Merge "Inline field and method resolution." into dalvik-dev
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 4c658a2..655c7dd 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -17,6 +17,7 @@
LOCAL_PATH := art
TEST_COMMON_SRC_FILES := \
+ compiler/dex/arena_allocator_test.cc \
compiler/driver/compiler_driver_test.cc \
compiler/elf_writer_test.cc \
compiler/image_test.cc \
@@ -27,6 +28,7 @@
compiler/utils/arm/managed_register_arm_test.cc \
compiler/utils/x86/managed_register_x86_test.cc \
runtime/barrier_test.cc \
+ runtime/base/bit_vector_test.cc \
runtime/base/histogram_test.cc \
runtime/base/mutex_test.cc \
runtime/base/timing_logger_test.cc \
diff --git a/compiler/dex/arena_allocator.cc b/compiler/dex/arena_allocator.cc
index 2da8064..95e44b3 100644
--- a/compiler/dex/arena_allocator.cc
+++ b/compiler/dex/arena_allocator.cc
@@ -50,7 +50,9 @@
map_(nullptr),
next_(nullptr) {
if (kUseMemMap) {
- map_ = MemMap::MapAnonymous("dalvik-arena", NULL, size, PROT_READ | PROT_WRITE);
+ std::string error_msg;
+ map_ = MemMap::MapAnonymous("dalvik-arena", NULL, size, PROT_READ | PROT_WRITE, &error_msg);
+ CHECK(map_ != nullptr) << error_msg;
memory_ = map_->Begin();
size_ = map_->Size();
} else {
diff --git a/compiler/dex/arena_allocator_test.cc b/compiler/dex/arena_allocator_test.cc
new file mode 100644
index 0000000..63dc615
--- /dev/null
+++ b/compiler/dex/arena_allocator_test.cc
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "arena_allocator.h"
+#include "arena_bit_vector.h"
+#include "gtest/gtest.h"
+
+namespace art {
+
+TEST(ArenaAllocator, Test) {
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+ ArenaBitVector bv(&arena, 10, true);
+ bv.SetBit(5);
+ EXPECT_EQ(1U, bv.GetStorageSize());
+ bv.SetBit(35);
+ EXPECT_EQ(2U, bv.GetStorageSize());
+}
+
+} // namespace art
diff --git a/compiler/dex/arena_bit_vector.cc b/compiler/dex/arena_bit_vector.cc
index b921f61..b567ae8 100644
--- a/compiler/dex/arena_bit_vector.cc
+++ b/compiler/dex/arena_bit_vector.cc
@@ -19,119 +19,29 @@
namespace art {
-// TODO: profile to make sure this is still a win relative to just using shifted masks.
-static uint32_t check_masks[32] = {
- 0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010,
- 0x00000020, 0x00000040, 0x00000080, 0x00000100, 0x00000200,
- 0x00000400, 0x00000800, 0x00001000, 0x00002000, 0x00004000,
- 0x00008000, 0x00010000, 0x00020000, 0x00040000, 0x00080000,
- 0x00100000, 0x00200000, 0x00400000, 0x00800000, 0x01000000,
- 0x02000000, 0x04000000, 0x08000000, 0x10000000, 0x20000000,
- 0x40000000, 0x80000000 };
+class ArenaBitVectorAllocator : public Allocator {
+ public:
+ explicit ArenaBitVectorAllocator(ArenaAllocator* arena) : arena_(arena) {}
+ ~ArenaBitVectorAllocator() {}
+
+ virtual void* Alloc(size_t size) {
+ return arena_->Alloc(size, ArenaAllocator::kAllocGrowableBitMap);
+ }
+
+ virtual void Free(void*) {} // Nop.
+
+ static void* operator new(size_t size, ArenaAllocator* arena) {
+ return arena->Alloc(sizeof(ArenaBitVectorAllocator), ArenaAllocator::kAllocGrowableBitMap);
+ }
+ static void operator delete(void* p) {} // Nop.
+
+ private:
+ ArenaAllocator* arena_;
+ DISALLOW_COPY_AND_ASSIGN(ArenaBitVectorAllocator);
+};
ArenaBitVector::ArenaBitVector(ArenaAllocator* arena, unsigned int start_bits,
bool expandable, OatBitMapKind kind)
- : arena_(arena),
- expandable_(expandable),
- kind_(kind),
- storage_size_((start_bits + 31) >> 5),
- storage_(static_cast<uint32_t*>(arena_->Alloc(storage_size_ * sizeof(uint32_t),
- ArenaAllocator::kAllocGrowableBitMap))) {
- DCHECK_EQ(sizeof(storage_[0]), 4U); // Assuming 32-bit units.
-}
-
-/*
- * Determine whether or not the specified bit is set.
- */
-bool ArenaBitVector::IsBitSet(unsigned int num) {
- DCHECK_LT(num, storage_size_ * sizeof(uint32_t) * 8);
-
- unsigned int val = storage_[num >> 5] & check_masks[num & 0x1f];
- return (val != 0);
-}
-
-// Mark all bits bit as "clear".
-void ArenaBitVector::ClearAllBits() {
- memset(storage_, 0, storage_size_ * sizeof(uint32_t));
-}
-
-// Mark the specified bit as "set".
-/*
- * TUNING: this could have pathologically bad growth/expand behavior. Make sure we're
- * not using it badly or change resize mechanism.
- */
-void ArenaBitVector::SetBit(unsigned int num) {
- if (num >= storage_size_ * sizeof(uint32_t) * 8) {
- DCHECK(expandable_) << "Attempted to expand a non-expandable bitmap to position " << num;
-
- /* Round up to word boundaries for "num+1" bits */
- unsigned int new_size = (num + 1 + 31) >> 5;
- DCHECK_GT(new_size, storage_size_);
- uint32_t *new_storage =
- static_cast<uint32_t*>(arena_->Alloc(new_size * sizeof(uint32_t),
- ArenaAllocator::kAllocGrowableBitMap));
- memcpy(new_storage, storage_, storage_size_ * sizeof(uint32_t));
- // Zero out the new storage words.
- memset(&new_storage[storage_size_], 0, (new_size - storage_size_) * sizeof(uint32_t));
- // TOTO: collect stats on space wasted because of resize.
- storage_ = new_storage;
- storage_size_ = new_size;
- }
-
- storage_[num >> 5] |= check_masks[num & 0x1f];
-}
-
-// Mark the specified bit as "unset".
-void ArenaBitVector::ClearBit(unsigned int num) {
- DCHECK_LT(num, storage_size_ * sizeof(uint32_t) * 8);
- storage_[num >> 5] &= ~check_masks[num & 0x1f];
-}
-
-// Intersect with another bit vector. Sizes and expandability must be the same.
-void ArenaBitVector::Intersect(const ArenaBitVector* src) {
- DCHECK_EQ(storage_size_, src->GetStorageSize());
- DCHECK_EQ(expandable_, src->IsExpandable());
- for (unsigned int idx = 0; idx < storage_size_; idx++) {
- storage_[idx] &= src->GetRawStorageWord(idx);
- }
-}
-
-/*
- * Union with another bit vector. Sizes and expandability must be the same.
- */
-void ArenaBitVector::Union(const ArenaBitVector* src) {
- DCHECK_EQ(storage_size_, src->GetStorageSize());
- DCHECK_EQ(expandable_, src->IsExpandable());
- for (unsigned int idx = 0; idx < storage_size_; idx++) {
- storage_[idx] |= src->GetRawStorageWord(idx);
- }
-}
-
-// Count the number of bits that are set.
-int ArenaBitVector::NumSetBits() {
- unsigned int count = 0;
-
- for (unsigned int word = 0; word < storage_size_; word++) {
- count += __builtin_popcount(storage_[word]);
- }
- return count;
-}
-
-/*
- * Mark specified number of bits as "set". Cannot set all bits like ClearAll
- * since there might be unused bits - setting those to one will confuse the
- * iterator.
- */
-void ArenaBitVector::SetInitialBits(unsigned int num_bits) {
- DCHECK_LE(((num_bits + 31) >> 5), storage_size_);
- unsigned int idx;
- for (idx = 0; idx < (num_bits >> 5); idx++) {
- storage_[idx] = -1;
- }
- unsigned int rem_num_bits = num_bits & 0x1f;
- if (rem_num_bits) {
- storage_[idx] = (1 << rem_num_bits) - 1;
- }
-}
+ : BitVector(start_bits, expandable, new (arena) ArenaBitVectorAllocator(arena)), kind_(kind) {}
} // namespace art
diff --git a/compiler/dex/arena_bit_vector.h b/compiler/dex/arena_bit_vector.h
index 24a7ce9..4b2193a 100644
--- a/compiler/dex/arena_bit_vector.h
+++ b/compiler/dex/arena_bit_vector.h
@@ -17,109 +17,28 @@
#ifndef ART_COMPILER_DEX_ARENA_BIT_VECTOR_H_
#define ART_COMPILER_DEX_ARENA_BIT_VECTOR_H_
-#include <stdint.h>
-#include <stddef.h>
-#include "compiler_enums.h"
#include "arena_allocator.h"
+#include "base/bit_vector.h"
+#include "compiler_enums.h"
namespace art {
/*
- * Expanding bitmap, used for tracking resources. Bits are numbered starting
- * from zero. All operations on a BitVector are unsynchronized.
+ * A BitVector implementation that uses Arena allocation.
*/
-class ArenaBitVector {
+class ArenaBitVector : public BitVector {
public:
- class Iterator {
- public:
- explicit Iterator(ArenaBitVector* bit_vector)
- : p_bits_(bit_vector),
- bit_storage_(bit_vector->GetRawStorage()),
- bit_index_(0),
- bit_size_(p_bits_->storage_size_ * sizeof(uint32_t) * 8) {}
-
- // Return the position of the next set bit. -1 means end-of-element reached.
- int Next() {
- // Did anything obviously change since we started?
- DCHECK_EQ(bit_size_, p_bits_->GetStorageSize() * sizeof(uint32_t) * 8);
- DCHECK_EQ(bit_storage_, p_bits_->GetRawStorage());
-
- if (UNLIKELY(bit_index_ >= bit_size_)) return -1;
-
- uint32_t word_index = bit_index_ / 32;
- uint32_t word = bit_storage_[word_index];
- // Mask out any bits in the first word we've already considered.
- word >>= bit_index_ & 0x1f;
- if (word == 0) {
- bit_index_ &= ~0x1f;
- do {
- word_index++;
- if (UNLIKELY((word_index * 32) >= bit_size_)) {
- bit_index_ = bit_size_;
- return -1;
- }
- word = bit_storage_[word_index];
- bit_index_ += 32;
- } while (word == 0);
- }
- bit_index_ += CTZ(word) + 1;
- return bit_index_ - 1;
- }
-
- static void* operator new(size_t size, ArenaAllocator* arena) {
- return arena->Alloc(sizeof(ArenaBitVector::Iterator),
- ArenaAllocator::kAllocGrowableBitMap);
- };
- static void operator delete(void* p) {} // Nop.
-
- private:
- ArenaBitVector* const p_bits_;
- uint32_t* const bit_storage_;
- uint32_t bit_index_; // Current index (size in bits).
- const uint32_t bit_size_; // Size of vector in bits.
- };
-
- ArenaBitVector(ArenaAllocator* arena, unsigned int start_bits, bool expandable,
+ ArenaBitVector(ArenaAllocator* arena, uint32_t start_bits, bool expandable,
OatBitMapKind kind = kBitMapMisc);
~ArenaBitVector() {}
- static void* operator new(size_t size, ArenaAllocator* arena) {
- return arena->Alloc(sizeof(ArenaBitVector), ArenaAllocator::kAllocGrowableBitMap);
- }
- static void operator delete(void* p) {} // Nop.
-
- void SetBit(unsigned int num);
- void ClearBit(unsigned int num);
- void MarkAllBits(bool set);
- void DebugBitVector(char* msg, int length);
- bool IsBitSet(unsigned int num);
- void ClearAllBits();
- void SetInitialBits(unsigned int num_bits);
- void Copy(ArenaBitVector* src) {
- memcpy(storage_, src->GetRawStorage(), sizeof(uint32_t) * storage_size_);
- }
- void Intersect(const ArenaBitVector* src2);
- void Union(const ArenaBitVector* src);
- // Are we equal to another bit vector? Note: expandability attributes must also match.
- bool Equal(const ArenaBitVector* src) {
- return (storage_size_ == src->GetStorageSize()) &&
- (expandable_ == src->IsExpandable()) &&
- (memcmp(storage_, src->GetRawStorage(), storage_size_ * 4) == 0);
- }
- int NumSetBits();
-
- uint32_t GetStorageSize() const { return storage_size_; }
- bool IsExpandable() const { return expandable_; }
- uint32_t GetRawStorageWord(size_t idx) const { return storage_[idx]; }
- uint32_t* GetRawStorage() { return storage_; }
- const uint32_t* GetRawStorage() const { return storage_; }
+ static void* operator new(size_t size, ArenaAllocator* arena) {
+ return arena->Alloc(sizeof(ArenaBitVector), ArenaAllocator::kAllocGrowableBitMap);
+ }
+ static void operator delete(void* p) {} // Nop.
private:
- ArenaAllocator* const arena_;
- const bool expandable_; // expand bitmap if we run out?
- const OatBitMapKind kind_; // for memory use tuning.
- uint32_t storage_size_; // current size, in 32-bit words.
- uint32_t* storage_;
+ const OatBitMapKind kind_; // for memory use tuning. TODO: currently unused.
};
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index 17b5bb5..6ea21fc 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -55,6 +55,7 @@
};
enum BBType {
+ kNullBlock,
kEntryBlock,
kDalvikByteCode,
kExitBlock,
@@ -180,6 +181,8 @@
kOpBic,
kOpCmn,
kOpTst,
+ kOpRev,
+ kOpRevsh,
kOpBkpt,
kOpBlx,
kOpPush,
diff --git a/compiler/dex/compiler_ir.h b/compiler/dex/compiler_ir.h
index 6607562..0d7209e 100644
--- a/compiler/dex/compiler_ir.h
+++ b/compiler/dex/compiler_ir.h
@@ -29,6 +29,7 @@
#include "llvm/intrinsic_helper.h"
#include "llvm/ir_builder.h"
#include "safe_map.h"
+#include "base/timing_logger.h"
namespace art {
@@ -68,7 +69,14 @@
compiler_flip_match(false),
arena(pool),
mir_graph(NULL),
- cg(NULL) {}
+ cg(NULL),
+ timings("QuickCompiler", true, false) {
+ }
+
+ void StartTimingSplit(const char* label);
+ void NewTimingSplit(const char* label);
+ void EndTiming();
+
/*
* Fields needed/generated by common frontend and generally used throughout
* the compiler.
@@ -90,14 +98,14 @@
InstructionSet instruction_set;
// TODO: much of this info available elsewhere. Go to the original source?
- int num_dalvik_registers; // method->registers_size.
+ uint16_t num_dalvik_registers; // method->registers_size.
const uint16_t* insns;
- int num_ins;
- int num_outs;
- int num_regs; // Unlike num_dalvik_registers, does not include ins.
+ uint16_t num_ins;
+ uint16_t num_outs;
+ uint16_t num_regs; // Unlike num_dalvik_registers, does not include ins.
// TODO: may want to move this to MIRGraph.
- int num_compiler_temps;
+ uint16_t num_compiler_temps;
// If non-empty, apply optimizer/debug flags only to matching methods.
std::string compiler_method_match;
@@ -109,6 +117,7 @@
UniquePtr<MIRGraph> mir_graph; // MIR container.
UniquePtr<Backend> cg; // Target-specific codegen.
+ base::TimingLogger timings;
};
} // namespace art
diff --git a/compiler/dex/dataflow_iterator-inl.h b/compiler/dex/dataflow_iterator-inl.h
index 236c6f4..74f36dd 100644
--- a/compiler/dex/dataflow_iterator-inl.h
+++ b/compiler/dex/dataflow_iterator-inl.h
@@ -25,7 +25,7 @@
inline BasicBlock* DataflowIterator::ForwardSingleNext() {
BasicBlock* res = NULL;
if (idx_ < end_idx_) {
- int bb_id = block_id_list_->Get(idx_++);
+ BasicBlockId bb_id = block_id_list_->Get(idx_++);
res = mir_graph_->GetBasicBlock(bb_id);
}
return res;
@@ -40,7 +40,7 @@
changed_ = false;
}
if (idx_ < end_idx_) {
- int bb_id = block_id_list_->Get(idx_++);
+ BasicBlockId bb_id = block_id_list_->Get(idx_++);
res = mir_graph_->GetBasicBlock(bb_id);
}
return res;
@@ -50,7 +50,7 @@
inline BasicBlock* DataflowIterator::ReverseSingleNext() {
BasicBlock* res = NULL;
if (idx_ >= 0) {
- int bb_id = block_id_list_->Get(idx_--);
+ BasicBlockId bb_id = block_id_list_->Get(idx_--);
res = mir_graph_->GetBasicBlock(bb_id);
}
return res;
@@ -65,7 +65,7 @@
changed_ = false;
}
if (idx_ >= 0) {
- int bb_id = block_id_list_->Get(idx_--);
+ BasicBlockId bb_id = block_id_list_->Get(idx_--);
res = mir_graph_->GetBasicBlock(bb_id);
}
return res;
diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h
index 1dab54e..26e3665 100644
--- a/compiler/dex/dataflow_iterator.h
+++ b/compiler/dex/dataflow_iterator.h
@@ -39,7 +39,7 @@
virtual ~DataflowIterator() {}
protected:
- DataflowIterator(MIRGraph* mir_graph, int start_idx, int end_idx)
+ DataflowIterator(MIRGraph* mir_graph, int32_t start_idx, int32_t end_idx)
: mir_graph_(mir_graph),
start_idx_(start_idx),
end_idx_(end_idx),
@@ -53,10 +53,10 @@
virtual BasicBlock* ReverseRepeatNext(bool had_change) ALWAYS_INLINE;
MIRGraph* const mir_graph_;
- const int start_idx_;
- const int end_idx_;
- GrowableArray<int>* block_id_list_;
- int idx_;
+ const int32_t start_idx_;
+ const int32_t end_idx_;
+ GrowableArray<BasicBlockId>* block_id_list_;
+ int32_t idx_;
bool changed_;
}; // DataflowIterator
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index 2952570..2f8521f 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -24,6 +24,7 @@
#include "runtime.h"
#include "backend.h"
#include "base/logging.h"
+#include "base/timing_logger.h"
#if defined(ART_USE_PORTABLE_COMPILER)
#include "dex/portable/mir_to_gbc.h"
@@ -104,8 +105,30 @@
// (1 << kDebugVerifyBitcode) |
// (1 << kDebugShowSummaryMemoryUsage) |
// (1 << kDebugShowFilterStats) |
+ // (1 << kDebugTimings) |
0;
+// TODO: Add a cumulative version of logging, and combine with dex2oat --dump-timing
+void CompilationUnit::StartTimingSplit(const char* label) {
+ if (enable_debug & (1 << kDebugTimings)) {
+ timings.StartSplit(label);
+ }
+}
+
+void CompilationUnit::NewTimingSplit(const char* label) {
+ if (enable_debug & (1 << kDebugTimings)) {
+ timings.NewSplit(label);
+ }
+}
+
+void CompilationUnit::EndTiming() {
+ if (enable_debug & (1 << kDebugTimings)) {
+ timings.EndSplit();
+ LOG(INFO) << "TIMINGS " << PrettyMethod(method_idx, *dex_file);
+ LOG(INFO) << Dumpable<base::TimingLogger>(timings);
+ }
+}
+
static CompiledMethod* CompileMethod(CompilerDriver& compiler,
const CompilerBackend compiler_backend,
const DexFile::CodeItem* code_item,
@@ -175,6 +198,7 @@
(1 << kPromoteCompilerTemps));
}
+ cu.StartTimingSplit("BuildMIRGraph");
cu.mir_graph.reset(new MIRGraph(&cu, &cu.arena));
/* Gathering opcode stats? */
@@ -192,22 +216,28 @@
}
#endif
+ cu.NewTimingSplit("MIROpt:CodeLayout");
+
/* Do a code layout pass */
cu.mir_graph->CodeLayout();
/* Perform SSA transformation for the whole method */
+ cu.NewTimingSplit("MIROpt:SSATransform");
cu.mir_graph->SSATransformation();
/* Do constant propagation */
+ cu.NewTimingSplit("MIROpt:ConstantProp");
cu.mir_graph->PropagateConstants();
/* Count uses */
cu.mir_graph->MethodUseCount();
/* Perform null check elimination */
+ cu.NewTimingSplit("MIROpt:NullCheckElimination");
cu.mir_graph->NullCheckElimination();
/* Combine basic blocks where possible */
+ cu.NewTimingSplit("MIROpt:BBOpt");
cu.mir_graph->BasicBlockCombine();
/* Do some basic block optimizations */
@@ -250,6 +280,7 @@
cu.cg->Materialize();
+ cu.NewTimingSplit("Cleanup");
result = cu.cg->GetCompiledMethod();
if (result) {
@@ -270,6 +301,7 @@
<< " " << PrettyMethod(method_idx, dex_file);
}
+ cu.EndTiming();
return result;
}
diff --git a/compiler/dex/frontend.h b/compiler/dex/frontend.h
index 6c33d10..43f6855 100644
--- a/compiler/dex/frontend.h
+++ b/compiler/dex/frontend.h
@@ -78,6 +78,7 @@
kDebugVerifyBitcode,
kDebugShowSummaryMemoryUsage,
kDebugShowFilterStats,
+ kDebugTimings
};
class LLVMInfo {
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index 8597172..89af06e 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -864,7 +864,7 @@
if (ending_bb->last_mir_insn != NULL) {
uint32_t ending_flags = analysis_attributes_[ending_bb->last_mir_insn->dalvikInsn.opcode];
while ((ending_flags & AN_BRANCH) == 0) {
- ending_bb = ending_bb->fall_through;
+ ending_bb = GetBasicBlock(ending_bb->fall_through);
ending_flags = analysis_attributes_[ending_bb->last_mir_insn->dalvikInsn.opcode];
}
}
@@ -876,13 +876,14 @@
*/
int loop_scale_factor = 1;
// Simple for and while loops
- if ((ending_bb->taken != NULL) && (ending_bb->fall_through == NULL)) {
- if ((ending_bb->taken->taken == bb) || (ending_bb->taken->fall_through == bb)) {
+ if ((ending_bb->taken != NullBasicBlockId) && (ending_bb->fall_through == NullBasicBlockId)) {
+ if ((GetBasicBlock(ending_bb->taken)->taken == bb->id) ||
+ (GetBasicBlock(ending_bb->taken)->fall_through == bb->id)) {
loop_scale_factor = 25;
}
}
// Simple do-while loop
- if ((ending_bb->taken != NULL) && (ending_bb->taken == bb)) {
+ if ((ending_bb->taken != NullBasicBlockId) && (ending_bb->taken == bb->id)) {
loop_scale_factor = 25;
}
@@ -922,7 +923,7 @@
if (tbb == ending_bb) {
done = true;
} else {
- tbb = tbb->fall_through;
+ tbb = GetBasicBlock(tbb->fall_through);
}
}
if (has_math && computational_block && (loop_scale_factor > 1)) {
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index 3d29908..9c8ce23 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -1295,23 +1295,23 @@
/* Verify if all the successor is connected with all the claimed predecessors */
bool MIRGraph::VerifyPredInfo(BasicBlock* bb) {
- GrowableArray<BasicBlock*>::Iterator iter(bb->predecessors);
+ GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
while (true) {
- BasicBlock *pred_bb = iter.Next();
+ BasicBlock *pred_bb = GetBasicBlock(iter.Next());
if (!pred_bb) break;
bool found = false;
- if (pred_bb->taken == bb) {
+ if (pred_bb->taken == bb->id) {
found = true;
- } else if (pred_bb->fall_through == bb) {
+ } else if (pred_bb->fall_through == bb->id) {
found = true;
- } else if (pred_bb->successor_block_list.block_list_type != kNotUsed) {
- GrowableArray<SuccessorBlockInfo*>::Iterator iterator(pred_bb->successor_block_list.blocks);
+ } else if (pred_bb->successor_block_list_type != kNotUsed) {
+ GrowableArray<SuccessorBlockInfo*>::Iterator iterator(pred_bb->successor_blocks);
while (true) {
SuccessorBlockInfo *successor_block_info = iterator.Next();
if (successor_block_info == NULL) break;
- BasicBlock *succ_bb = successor_block_info->block;
- if (succ_bb == bb) {
+ BasicBlockId succ_bb = successor_block_info->block;
+ if (succ_bb == bb->id) {
found = true;
break;
}
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index fb306de..cf758fc 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -130,11 +130,14 @@
/* Split an existing block from the specified code offset into two */
-BasicBlock* MIRGraph::SplitBlock(unsigned int code_offset,
+BasicBlock* MIRGraph::SplitBlock(DexOffset code_offset,
BasicBlock* orig_block, BasicBlock** immed_pred_block_p) {
+ DCHECK_GT(code_offset, orig_block->start_offset);
MIR* insn = orig_block->first_mir_insn;
+ MIR* prev = NULL;
while (insn) {
if (insn->offset == code_offset) break;
+ prev = insn;
insn = insn->next;
}
if (insn == NULL) {
@@ -156,39 +159,42 @@
/* Handle the taken path */
bottom_block->taken = orig_block->taken;
- if (bottom_block->taken) {
- orig_block->taken = NULL;
- bottom_block->taken->predecessors->Delete(orig_block);
- bottom_block->taken->predecessors->Insert(bottom_block);
+ if (bottom_block->taken != NullBasicBlockId) {
+ orig_block->taken = NullBasicBlockId;
+ BasicBlock* bb_taken = GetBasicBlock(bottom_block->taken);
+ bb_taken->predecessors->Delete(orig_block->id);
+ bb_taken->predecessors->Insert(bottom_block->id);
}
/* Handle the fallthrough path */
bottom_block->fall_through = orig_block->fall_through;
- orig_block->fall_through = bottom_block;
- bottom_block->predecessors->Insert(orig_block);
- if (bottom_block->fall_through) {
- bottom_block->fall_through->predecessors->Delete(orig_block);
- bottom_block->fall_through->predecessors->Insert(bottom_block);
+ orig_block->fall_through = bottom_block->id;
+ bottom_block->predecessors->Insert(orig_block->id);
+ if (bottom_block->fall_through != NullBasicBlockId) {
+ BasicBlock* bb_fall_through = GetBasicBlock(bottom_block->fall_through);
+ bb_fall_through->predecessors->Delete(orig_block->id);
+ bb_fall_through->predecessors->Insert(bottom_block->id);
}
/* Handle the successor list */
- if (orig_block->successor_block_list.block_list_type != kNotUsed) {
- bottom_block->successor_block_list = orig_block->successor_block_list;
- orig_block->successor_block_list.block_list_type = kNotUsed;
- GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bottom_block->successor_block_list.blocks);
+ if (orig_block->successor_block_list_type != kNotUsed) {
+ bottom_block->successor_block_list_type = orig_block->successor_block_list_type;
+ bottom_block->successor_blocks = orig_block->successor_blocks;
+ orig_block->successor_block_list_type = kNotUsed;
+ orig_block->successor_blocks = NULL;
+ GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bottom_block->successor_blocks);
while (true) {
SuccessorBlockInfo *successor_block_info = iterator.Next();
if (successor_block_info == NULL) break;
- BasicBlock *bb = successor_block_info->block;
- bb->predecessors->Delete(orig_block);
- bb->predecessors->Insert(bottom_block);
+ BasicBlock *bb = GetBasicBlock(successor_block_info->block);
+ bb->predecessors->Delete(orig_block->id);
+ bb->predecessors->Insert(bottom_block->id);
}
}
- orig_block->last_mir_insn = insn->prev;
+ orig_block->last_mir_insn = prev;
+ prev->next = NULL;
- insn->prev->next = NULL;
- insn->prev = NULL;
/*
* Update the immediate predecessor block pointer so that outgoing edges
* can be applied to the proper block.
@@ -225,7 +231,7 @@
* (by the caller)
* Utilizes a map for fast lookup of the typical cases.
*/
-BasicBlock* MIRGraph::FindBlock(unsigned int code_offset, bool split, bool create,
+BasicBlock* MIRGraph::FindBlock(DexOffset code_offset, bool split, bool create,
BasicBlock** immed_pred_block_p) {
if (code_offset >= cu_->code_item->insns_size_in_code_units_) {
return NULL;
@@ -261,7 +267,7 @@
/* Identify code range in try blocks and set up the empty catch blocks */
void MIRGraph::ProcessTryCatchBlocks() {
int tries_size = current_code_item_->tries_size_;
- int offset;
+ DexOffset offset;
if (tries_size == 0) {
return;
@@ -270,8 +276,8 @@
for (int i = 0; i < tries_size; i++) {
const DexFile::TryItem* pTry =
DexFile::GetTryItems(*current_code_item_, i);
- int start_offset = pTry->start_addr_;
- int end_offset = start_offset + pTry->insn_count_;
+ DexOffset start_offset = pTry->start_addr_;
+ DexOffset end_offset = start_offset + pTry->insn_count_;
for (offset = start_offset; offset < end_offset; offset++) {
try_block_addr_->SetBit(offset);
}
@@ -292,10 +298,10 @@
}
/* Process instructions with the kBranch flag */
-BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width,
- int flags, const uint16_t* code_ptr,
+BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
+ int width, int flags, const uint16_t* code_ptr,
const uint16_t* code_end) {
- int target = cur_offset;
+ DexOffset target = cur_offset;
switch (insn->dalvikInsn.opcode) {
case Instruction::GOTO:
case Instruction::GOTO_16:
@@ -326,8 +332,8 @@
CountBranch(target);
BasicBlock *taken_block = FindBlock(target, /* split */ true, /* create */ true,
/* immed_pred_block_p */ &cur_block);
- cur_block->taken = taken_block;
- taken_block->predecessors->Insert(cur_block);
+ cur_block->taken = taken_block->id;
+ taken_block->predecessors->Insert(cur_block->id);
/* Always terminate the current block for conditional branches */
if (flags & Instruction::kContinue) {
@@ -349,8 +355,8 @@
true,
/* immed_pred_block_p */
&cur_block);
- cur_block->fall_through = fallthrough_block;
- fallthrough_block->predecessors->Insert(cur_block);
+ cur_block->fall_through = fallthrough_block->id;
+ fallthrough_block->predecessors->Insert(cur_block->id);
} else if (code_ptr < code_end) {
FindBlock(cur_offset + width, /* split */ false, /* create */ true,
/* immed_pred_block_p */ NULL);
@@ -359,7 +365,7 @@
}
/* Process instructions with the kSwitch flag */
-void MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width,
+void MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
int flags) {
const uint16_t* switch_data =
reinterpret_cast<const uint16_t*>(GetCurrentInsns() + cur_offset + insn->dalvikInsn.vB);
@@ -403,14 +409,13 @@
first_key = 0; // To make the compiler happy
}
- if (cur_block->successor_block_list.block_list_type != kNotUsed) {
+ if (cur_block->successor_block_list_type != kNotUsed) {
LOG(FATAL) << "Successor block list already in use: "
- << static_cast<int>(cur_block->successor_block_list.block_list_type);
+ << static_cast<int>(cur_block->successor_block_list_type);
}
- cur_block->successor_block_list.block_list_type =
- (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?
- kPackedSwitch : kSparseSwitch;
- cur_block->successor_block_list.blocks =
+ cur_block->successor_block_list_type =
+ (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ? kPackedSwitch : kSparseSwitch;
+ cur_block->successor_blocks =
new (arena_) GrowableArray<SuccessorBlockInfo*>(arena_, size, kGrowableArraySuccessorBlocks);
for (i = 0; i < size; i++) {
@@ -419,24 +424,24 @@
SuccessorBlockInfo *successor_block_info =
static_cast<SuccessorBlockInfo*>(arena_->Alloc(sizeof(SuccessorBlockInfo),
ArenaAllocator::kAllocSuccessor));
- successor_block_info->block = case_block;
+ successor_block_info->block = case_block->id;
successor_block_info->key =
(insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?
first_key + i : keyTable[i];
- cur_block->successor_block_list.blocks->Insert(successor_block_info);
- case_block->predecessors->Insert(cur_block);
+ cur_block->successor_blocks->Insert(successor_block_info);
+ case_block->predecessors->Insert(cur_block->id);
}
/* Fall-through case */
BasicBlock* fallthrough_block = FindBlock(cur_offset + width, /* split */ false,
/* create */ true, /* immed_pred_block_p */ NULL);
- cur_block->fall_through = fallthrough_block;
- fallthrough_block->predecessors->Insert(cur_block);
+ cur_block->fall_through = fallthrough_block->id;
+ fallthrough_block->predecessors->Insert(cur_block->id);
}
/* Process instructions with the kThrow flag */
-BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, int cur_offset, int width,
- int flags, ArenaBitVector* try_block_addr,
+BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
+ int width, int flags, ArenaBitVector* try_block_addr,
const uint16_t* code_ptr, const uint16_t* code_end) {
bool in_try_block = try_block_addr->IsBitSet(cur_offset);
@@ -444,14 +449,14 @@
if (in_try_block) {
CatchHandlerIterator iterator(*current_code_item_, cur_offset);
- if (cur_block->successor_block_list.block_list_type != kNotUsed) {
+ if (cur_block->successor_block_list_type != kNotUsed) {
LOG(INFO) << PrettyMethod(cu_->method_idx, *cu_->dex_file);
LOG(FATAL) << "Successor block list already in use: "
- << static_cast<int>(cur_block->successor_block_list.block_list_type);
+ << static_cast<int>(cur_block->successor_block_list_type);
}
- cur_block->successor_block_list.block_list_type = kCatch;
- cur_block->successor_block_list.blocks =
+ cur_block->successor_block_list_type = kCatch;
+ cur_block->successor_blocks =
new (arena_) GrowableArray<SuccessorBlockInfo*>(arena_, 2, kGrowableArraySuccessorBlocks);
for (; iterator.HasNext(); iterator.Next()) {
@@ -463,17 +468,17 @@
}
SuccessorBlockInfo *successor_block_info = reinterpret_cast<SuccessorBlockInfo*>
(arena_->Alloc(sizeof(SuccessorBlockInfo), ArenaAllocator::kAllocSuccessor));
- successor_block_info->block = catch_block;
+ successor_block_info->block = catch_block->id;
successor_block_info->key = iterator.GetHandlerTypeIndex();
- cur_block->successor_block_list.blocks->Insert(successor_block_info);
- catch_block->predecessors->Insert(cur_block);
+ cur_block->successor_blocks->Insert(successor_block_info);
+ catch_block->predecessors->Insert(cur_block->id);
}
} else {
BasicBlock *eh_block = NewMemBB(kExceptionHandling, num_blocks_++);
- cur_block->taken = eh_block;
+ cur_block->taken = eh_block->id;
block_list_.Insert(eh_block);
eh_block->start_offset = cur_offset;
- eh_block->predecessors->Insert(cur_block);
+ eh_block->predecessors->Insert(cur_block->id);
}
if (insn->dalvikInsn.opcode == Instruction::THROW) {
@@ -509,8 +514,8 @@
BasicBlock *new_block = NewMemBB(kDalvikByteCode, num_blocks_++);
block_list_.Insert(new_block);
new_block->start_offset = insn->offset;
- cur_block->fall_through = new_block;
- new_block->predecessors->Insert(cur_block);
+ cur_block->fall_through = new_block->id;
+ new_block->predecessors->Insert(cur_block->id);
MIR* new_insn = static_cast<MIR*>(arena_->Alloc(sizeof(MIR), ArenaAllocator::kAllocMIR));
*new_insn = *insn;
insn->dalvikInsn.opcode =
@@ -551,9 +556,14 @@
DCHECK(entry_block_ == NULL);
DCHECK(exit_block_ == NULL);
DCHECK_EQ(num_blocks_, 0);
+ // Use id 0 to represent a null block.
+ BasicBlock* null_block = NewMemBB(kNullBlock, num_blocks_++);
+ DCHECK_EQ(null_block->id, NullBasicBlockId);
+ null_block->hidden = true;
+ block_list_.Insert(null_block);
entry_block_ = NewMemBB(kEntryBlock, num_blocks_++);
- exit_block_ = NewMemBB(kExitBlock, num_blocks_++);
block_list_.Insert(entry_block_);
+ exit_block_ = NewMemBB(kExitBlock, num_blocks_++);
block_list_.Insert(exit_block_);
// TODO: deprecate all "cu->" fields; move what's left to wherever CompilationUnit is allocated.
cu_->dex_file = &dex_file;
@@ -578,12 +588,12 @@
/* Current block to record parsed instructions */
BasicBlock *cur_block = NewMemBB(kDalvikByteCode, num_blocks_++);
- DCHECK_EQ(current_offset_, 0);
+ DCHECK_EQ(current_offset_, 0U);
cur_block->start_offset = current_offset_;
block_list_.Insert(cur_block);
- // FIXME: this needs to insert at the insert point rather than entry block.
- entry_block_->fall_through = cur_block;
- cur_block->predecessors->Insert(entry_block_);
+ // TODO: for inlining support, insert at the insert point rather than entry block.
+ entry_block_->fall_through = cur_block->id;
+ cur_block->predecessors->Insert(entry_block_->id);
/* Identify code range in try blocks and set up the empty catch blocks */
ProcessTryCatchBlocks();
@@ -648,8 +658,8 @@
// It is a simple nop - treat normally.
AppendMIR(cur_block, insn);
} else {
- DCHECK(cur_block->fall_through == NULL);
- DCHECK(cur_block->taken == NULL);
+ DCHECK(cur_block->fall_through == NullBasicBlockId);
+ DCHECK(cur_block->taken == NullBasicBlockId);
// Unreachable instruction, mark for no continuation.
flags &= ~Instruction::kContinue;
}
@@ -667,8 +677,8 @@
width, flags, code_ptr, code_end);
} else if (flags & Instruction::kReturn) {
cur_block->terminated_by_return = true;
- cur_block->fall_through = exit_block_;
- exit_block_->predecessors->Insert(cur_block);
+ cur_block->fall_through = exit_block_->id;
+ exit_block_->predecessors->Insert(cur_block->id);
/*
* Terminate the current block if there are instructions
* afterwards.
@@ -697,13 +707,13 @@
* instruction is not an unconditional branch, connect them through
* the fall-through link.
*/
- DCHECK(cur_block->fall_through == NULL ||
- cur_block->fall_through == next_block ||
- cur_block->fall_through == exit_block_);
+ DCHECK(cur_block->fall_through == NullBasicBlockId ||
+ GetBasicBlock(cur_block->fall_through) == next_block ||
+ GetBasicBlock(cur_block->fall_through) == exit_block_);
- if ((cur_block->fall_through == NULL) && (flags & Instruction::kContinue)) {
- cur_block->fall_through = next_block;
- next_block->predecessors->Insert(cur_block);
+ if ((cur_block->fall_through == NullBasicBlockId) && (flags & Instruction::kContinue)) {
+ cur_block->fall_through = next_block->id;
+ next_block->predecessors->Insert(cur_block->id);
}
cur_block = next_block;
}
@@ -735,7 +745,7 @@
std::string fname(PrettyMethod(cu_->method_idx, *cu_->dex_file));
ReplaceSpecialChars(fname);
fname = StringPrintf("%s%s%x.dot", dir_prefix, fname.c_str(),
- GetEntryBlock()->fall_through->start_offset);
+ GetBasicBlock(GetEntryBlock()->fall_through)->start_offset);
file = fopen(fname.c_str(), "w");
if (file == NULL) {
return;
@@ -782,31 +792,30 @@
char block_name1[BLOCK_NAME_LEN], block_name2[BLOCK_NAME_LEN];
- if (bb->taken) {
+ if (bb->taken != NullBasicBlockId) {
GetBlockName(bb, block_name1);
- GetBlockName(bb->taken, block_name2);
+ GetBlockName(GetBasicBlock(bb->taken), block_name2);
fprintf(file, " %s:s -> %s:n [style=dotted]\n",
block_name1, block_name2);
}
- if (bb->fall_through) {
+ if (bb->fall_through != NullBasicBlockId) {
GetBlockName(bb, block_name1);
- GetBlockName(bb->fall_through, block_name2);
+ GetBlockName(GetBasicBlock(bb->fall_through), block_name2);
fprintf(file, " %s:s -> %s:n\n", block_name1, block_name2);
}
- if (bb->successor_block_list.block_list_type != kNotUsed) {
+ if (bb->successor_block_list_type != kNotUsed) {
fprintf(file, " succ%04x_%d [shape=%s,label = \"{ \\\n",
bb->start_offset, bb->id,
- (bb->successor_block_list.block_list_type == kCatch) ?
- "Mrecord" : "record");
- GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_block_list.blocks);
+ (bb->successor_block_list_type == kCatch) ? "Mrecord" : "record");
+ GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_blocks);
SuccessorBlockInfo *successor_block_info = iterator.Next();
int succ_id = 0;
while (true) {
if (successor_block_info == NULL) break;
- BasicBlock *dest_block = successor_block_info->block;
+ BasicBlock *dest_block = GetBasicBlock(successor_block_info->block);
SuccessorBlockInfo *next_successor_block_info = iterator.Next();
fprintf(file, " {<f%d> %04x: %04x\\l}%s\\\n",
@@ -823,16 +832,16 @@
fprintf(file, " %s:s -> succ%04x_%d:n [style=dashed]\n",
block_name1, bb->start_offset, bb->id);
- if (bb->successor_block_list.block_list_type == kPackedSwitch ||
- bb->successor_block_list.block_list_type == kSparseSwitch) {
- GrowableArray<SuccessorBlockInfo*>::Iterator iter(bb->successor_block_list.blocks);
+ if (bb->successor_block_list_type == kPackedSwitch ||
+ bb->successor_block_list_type == kSparseSwitch) {
+ GrowableArray<SuccessorBlockInfo*>::Iterator iter(bb->successor_blocks);
succ_id = 0;
while (true) {
SuccessorBlockInfo *successor_block_info = iter.Next();
if (successor_block_info == NULL) break;
- BasicBlock *dest_block = successor_block_info->block;
+ BasicBlock* dest_block = GetBasicBlock(successor_block_info->block);
GetBlockName(dest_block, block_name2);
fprintf(file, " succ%04x_%d:f%d:e -> %s:n\n", bb->start_offset,
@@ -848,7 +857,7 @@
fprintf(file, " cfg%s [label=\"%s\", shape=none];\n",
block_name1, block_name1);
if (bb->i_dom) {
- GetBlockName(bb->i_dom, block_name2);
+ GetBlockName(GetBasicBlock(bb->i_dom), block_name2);
fprintf(file, " cfg%s:s -> cfg%s:n\n\n", block_name2, block_name1);
}
}
@@ -862,10 +871,9 @@
if (bb->first_mir_insn == NULL) {
DCHECK(bb->last_mir_insn == NULL);
bb->last_mir_insn = bb->first_mir_insn = mir;
- mir->prev = mir->next = NULL;
+ mir->next = NULL;
} else {
bb->last_mir_insn->next = mir;
- mir->prev = bb->last_mir_insn;
mir->next = NULL;
bb->last_mir_insn = mir;
}
@@ -876,25 +884,19 @@
if (bb->first_mir_insn == NULL) {
DCHECK(bb->last_mir_insn == NULL);
bb->last_mir_insn = bb->first_mir_insn = mir;
- mir->prev = mir->next = NULL;
+ mir->next = NULL;
} else {
- bb->first_mir_insn->prev = mir;
mir->next = bb->first_mir_insn;
- mir->prev = NULL;
bb->first_mir_insn = mir;
}
}
/* Insert a MIR instruction after the specified MIR */
void MIRGraph::InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir) {
- new_mir->prev = current_mir;
new_mir->next = current_mir->next;
current_mir->next = new_mir;
- if (new_mir->next) {
- /* Is not the last MIR in the block */
- new_mir->next->prev = new_mir;
- } else {
+ if (bb->last_mir_insn == current_mir) {
/* Is the last MIR in the block */
bb->last_mir_insn = new_mir;
}
@@ -924,8 +926,9 @@
opcode = insn.opcode;
} else if (opcode == kMirOpNop) {
str.append("[");
- insn.opcode = mir->meta.original_opcode;
- opcode = mir->meta.original_opcode;
+ // Recover original opcode.
+ insn.opcode = Instruction::At(current_code_item_->insns_ + mir->offset)->Opcode();
+ opcode = insn.opcode;
nop = true;
}
@@ -938,7 +941,7 @@
}
if (opcode == kMirOpPhi) {
- int* incoming = reinterpret_cast<int*>(insn.vB);
+ BasicBlockId* incoming = mir->meta.phi_incoming;
str.append(StringPrintf(" %s = (%s",
GetSSANameWithConst(ssa_rep->defs[0], true).c_str(),
GetSSANameWithConst(ssa_rep->uses[0], true).c_str()));
@@ -1088,7 +1091,7 @@
}
const char* MIRGraph::GetShortyFromTargetIdx(int target_idx) {
- // FIXME: use current code unit for inline support.
+ // TODO: for inlining support, use current code unit.
const DexFile::MethodId& method_id = cu_->dex_file->GetMethodId(target_idx);
return cu_->dex_file->GetShorty(method_id.proto_idx_);
}
@@ -1118,13 +1121,13 @@
bb->start_offset,
bb->last_mir_insn ? bb->last_mir_insn->offset : bb->start_offset,
bb->last_mir_insn ? "" : " empty");
- if (bb->taken) {
- LOG(INFO) << " Taken branch: block " << bb->taken->id
- << "(0x" << std::hex << bb->taken->start_offset << ")";
+ if (bb->taken != NullBasicBlockId) {
+ LOG(INFO) << " Taken branch: block " << bb->taken
+ << "(0x" << std::hex << GetBasicBlock(bb->taken)->start_offset << ")";
}
- if (bb->fall_through) {
- LOG(INFO) << " Fallthrough : block " << bb->fall_through->id
- << " (0x" << std::hex << bb->fall_through->start_offset << ")";
+ if (bb->fall_through != NullBasicBlockId) {
+ LOG(INFO) << " Fallthrough : block " << bb->fall_through
+ << " (0x" << std::hex << GetBasicBlock(bb->fall_through)->start_offset << ")";
}
}
}
@@ -1144,7 +1147,6 @@
info->result.location = kLocInvalid;
} else {
info->result = GetRawDest(move_result_mir);
- move_result_mir->meta.original_opcode = move_result_mir->dalvikInsn.opcode;
move_result_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
}
info->num_arg_words = mir->ssa_rep->num_uses;
@@ -1168,10 +1170,10 @@
bb->block_type = block_type;
bb->id = block_id;
// TUNING: better estimate of the exit block predecessors?
- bb->predecessors = new (arena_) GrowableArray<BasicBlock*>(arena_,
+ bb->predecessors = new (arena_) GrowableArray<BasicBlockId>(arena_,
(block_type == kExitBlock) ? 2048 : 2,
kGrowableArrayPredecessors);
- bb->successor_block_list.block_list_type = kNotUsed;
+ bb->successor_block_list_type = kNotUsed;
block_id_map_.Put(block_id, block_id);
return bb;
}
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 5d01489..8dda7c4 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -183,6 +183,9 @@
#define BLOCK_NAME_LEN 80
+typedef uint16_t BasicBlockId;
+static const BasicBlockId NullBasicBlockId = 0;
+
/*
* In general, vreg/sreg describe Dalvik registers that originated with dx. However,
* it is useful to have compiler-generated temporary registers and have them treated
@@ -190,15 +193,15 @@
* name of compiler-introduced temporaries.
*/
struct CompilerTemp {
- int s_reg;
+ int32_t s_reg;
};
// When debug option enabled, records effectiveness of null and range check elimination.
struct Checkstats {
- int null_checks;
- int null_checks_eliminated;
- int range_checks;
- int range_checks_eliminated;
+ int32_t null_checks;
+ int32_t null_checks_eliminated;
+ int32_t range_checks;
+ int32_t range_checks_eliminated;
};
// Dataflow attributes of a basic block.
@@ -207,7 +210,7 @@
ArenaBitVector* def_v;
ArenaBitVector* live_in_v;
ArenaBitVector* phi_v;
- int* vreg_to_ssa_map;
+ int32_t* vreg_to_ssa_map;
ArenaBitVector* ending_null_check_v;
};
@@ -220,11 +223,11 @@
* we may want to revisit in the future.
*/
struct SSARepresentation {
- int num_uses;
- int* uses;
+ int16_t num_uses;
+ int16_t num_defs;
+ int32_t* uses;
bool* fp_use;
- int num_defs;
- int* defs;
+ int32_t* defs;
bool* fp_def;
};
@@ -233,51 +236,53 @@
* wrapper around a Dalvik byte code.
*/
struct MIR {
+ /*
+ * TODO: remove embedded DecodedInstruction to save space, keeping only opcode. Recover
+ * additional fields on as-needed basis. Question: how to support MIR Pseudo-ops; probably
+ * need to carry aux data pointer.
+ */
DecodedInstruction dalvikInsn;
- uint32_t width; // NOTE: only need 16 bits for width.
- unsigned int offset;
- int m_unit_index; // From which method was this MIR included
- MIR* prev;
+ uint16_t width; // Note: width can include switch table or fill array data.
+ NarrowDexOffset offset; // Offset of the instruction in code units.
+ uint16_t optimization_flags;
+ int16_t m_unit_index; // From which method was this MIR included
MIR* next;
SSARepresentation* ssa_rep;
- int optimization_flags;
union {
+ // Incoming edges for phi node.
+ BasicBlockId* phi_incoming;
// Establish link between two halves of throwing instructions.
MIR* throw_insn;
- // Saved opcode for NOP'd MIRs
- Instruction::Code original_opcode;
} meta;
};
struct SuccessorBlockInfo;
struct BasicBlock {
- int id;
- int dfs_id;
- bool visited;
- bool hidden;
- bool catch_entry;
- bool explicit_throw;
- bool conditional_branch;
- bool terminated_by_return; // Block ends with a Dalvik return opcode.
- bool dominates_return; // Is a member of return extended basic block.
- uint16_t start_offset;
+ BasicBlockId id;
+ BasicBlockId dfs_id;
+ NarrowDexOffset start_offset; // Offset in code units.
+ BasicBlockId fall_through;
+ BasicBlockId taken;
+ BasicBlockId i_dom; // Immediate dominator.
uint16_t nesting_depth;
- BBType block_type;
+ BBType block_type:4;
+ BlockListType successor_block_list_type:4;
+ bool visited:1;
+ bool hidden:1;
+ bool catch_entry:1;
+ bool explicit_throw:1;
+ bool conditional_branch:1;
+ bool terminated_by_return:1; // Block ends with a Dalvik return opcode.
+ bool dominates_return:1; // Is a member of return extended basic block.
MIR* first_mir_insn;
MIR* last_mir_insn;
- BasicBlock* fall_through;
- BasicBlock* taken;
- BasicBlock* i_dom; // Immediate dominator.
BasicBlockDataFlow* data_flow_info;
- GrowableArray<BasicBlock*>* predecessors;
ArenaBitVector* dominators;
ArenaBitVector* i_dominated; // Set nodes being immediately dominated.
ArenaBitVector* dom_frontier; // Dominance frontier.
- struct { // For one-to-many successors like.
- BlockListType block_list_type; // switch and exception handling.
- GrowableArray<SuccessorBlockInfo*>* blocks;
- } successor_block_list;
+ GrowableArray<BasicBlockId>* predecessors;
+ GrowableArray<SuccessorBlockInfo*>* successor_blocks;
};
/*
@@ -285,9 +290,8 @@
* "SuccessorBlockInfo". For catch blocks, key is type index for the exception. For swtich
* blocks, key is the case value.
*/
-// TODO: make class with placement new.
struct SuccessorBlockInfo {
- BasicBlock* block;
+ BasicBlockId block;
int key;
};
@@ -296,6 +300,15 @@
* the type of an SSA name (and, can also be used by code generators to record where the
* value is located (i.e. - physical register, frame, spill, etc.). For each SSA name (SReg)
* there is a RegLocation.
+ * A note on SSA names:
+ * o SSA names for Dalvik vRegs v0..vN will be assigned 0..N. These represent the "vN_0"
+ * names. Negative SSA names represent special values not present in the Dalvik byte code.
+ * For example, SSA name -1 represents an invalid SSA name, and SSA name -2 represents the
+ * the Method pointer. SSA names < -2 are reserved for future use.
+ * o The vN_0 names for non-argument Dalvik should in practice never be used (as they would
+ * represent the read of an undefined local variable). The first definition of the
+ * underlying Dalvik vReg will result in a vN_1 name.
+ *
* FIXME: The orig_sreg field was added as a workaround for llvm bitcode generation. With
* the latest restructuring, we should be able to remove it and rely on s_reg_low throughout.
*/
@@ -311,9 +324,9 @@
unsigned home:1; // Does this represent the home location?
uint8_t low_reg; // First physical register.
uint8_t high_reg; // 2nd physical register (if wide).
- int32_t s_reg_low; // SSA name for low Dalvik word.
- int32_t orig_sreg; // TODO: remove after Bitcode gen complete
- // and consolodate usage w/ s_reg_low.
+ int16_t s_reg_low; // SSA name for low Dalvik word.
+ int16_t orig_sreg; // TODO: remove after Bitcode gen complete
+ // and consolidate usage w/ s_reg_low.
};
/*
@@ -334,7 +347,7 @@
RegLocation target; // Target of following move_result.
bool skip_this;
bool is_range;
- int offset; // Dalvik offset.
+ DexOffset offset; // Offset in code units.
};
@@ -361,7 +374,7 @@
uint32_t method_idx, jobject class_loader, const DexFile& dex_file);
/* Find existing block */
- BasicBlock* FindBlock(unsigned int code_offset) {
+ BasicBlock* FindBlock(DexOffset code_offset) {
return FindBlock(code_offset, false, false, NULL);
}
@@ -394,7 +407,7 @@
}
BasicBlock* GetBasicBlock(int block_id) const {
- return block_list_.Get(block_id);
+ return (block_id == NullBasicBlockId) ? NULL : block_list_.Get(block_id);
}
size_t GetBasicBlockListCount() const {
@@ -405,15 +418,15 @@
return &block_list_;
}
- GrowableArray<int>* GetDfsOrder() {
+ GrowableArray<BasicBlockId>* GetDfsOrder() {
return dfs_order_;
}
- GrowableArray<int>* GetDfsPostOrder() {
+ GrowableArray<BasicBlockId>* GetDfsPostOrder() {
return dfs_post_order_;
}
- GrowableArray<int>* GetDomPostOrder() {
+ GrowableArray<BasicBlockId>* GetDomPostOrder() {
return dom_post_order_traversal_;
}
@@ -477,6 +490,12 @@
}
void SetNumSSARegs(int new_num) {
+ /*
+ * TODO: It's theoretically possible to exceed 32767, though any cases which did
+ * would be filtered out with current settings. When orig_sreg field is removed
+ * from RegLocation, expand s_reg_low to handle all possible cases and remove DCHECK().
+ */
+ DCHECK_EQ(new_num, static_cast<int16_t>(new_num));
num_ssa_regs_ = new_num;
}
@@ -561,15 +580,16 @@
return special_case_;
}
- bool IsBackedge(BasicBlock* branch_bb, BasicBlock* target_bb) {
- return ((target_bb != NULL) && (target_bb->start_offset <= branch_bb->start_offset));
+ bool IsBackedge(BasicBlock* branch_bb, BasicBlockId target_bb_id) {
+ return ((target_bb_id != NullBasicBlockId) &&
+ (GetBasicBlock(target_bb_id)->start_offset <= branch_bb->start_offset));
}
bool IsBackwardsBranch(BasicBlock* branch_bb) {
return IsBackedge(branch_bb, branch_bb->taken) || IsBackedge(branch_bb, branch_bb->fall_through);
}
- void CountBranch(int target_offset) {
+ void CountBranch(DexOffset target_offset) {
if (target_offset <= current_offset_) {
backward_branches_++;
} else {
@@ -640,6 +660,9 @@
void DumpMIRGraph();
CallInfo* NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, bool is_range);
BasicBlock* NewMemBB(BBType block_type, int block_id);
+ MIR* AdvanceMIR(BasicBlock** p_bb, MIR* mir);
+ BasicBlock* NextDominatedBlock(BasicBlock* bb);
+ bool LayoutBlocks(BasicBlock* bb);
/*
* IsDebugBuild sanity check: keep track of the Dex PCs for catch entries so that later on
@@ -668,15 +691,16 @@
bool InvokeUsesMethodStar(MIR* mir);
int ParseInsn(const uint16_t* code_ptr, DecodedInstruction* decoded_instruction);
bool ContentIsInsn(const uint16_t* code_ptr);
- BasicBlock* SplitBlock(unsigned int code_offset, BasicBlock* orig_block,
+ BasicBlock* SplitBlock(DexOffset code_offset, BasicBlock* orig_block,
BasicBlock** immed_pred_block_p);
- BasicBlock* FindBlock(unsigned int code_offset, bool split, bool create,
+ BasicBlock* FindBlock(DexOffset code_offset, bool split, bool create,
BasicBlock** immed_pred_block_p);
void ProcessTryCatchBlocks();
- BasicBlock* ProcessCanBranch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width,
+ BasicBlock* ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
int flags, const uint16_t* code_ptr, const uint16_t* code_end);
- void ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, int cur_offset, int width, int flags);
- BasicBlock* ProcessCanThrow(BasicBlock* cur_block, MIR* insn, int cur_offset, int width,
+ void ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
+ int flags);
+ BasicBlock* ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
int flags, ArenaBitVector* try_block_addr, const uint16_t* code_ptr,
const uint16_t* code_end);
int AddNewSReg(int v_reg);
@@ -732,9 +756,9 @@
GrowableArray<uint32_t> use_counts_; // Weighted by nesting depth
GrowableArray<uint32_t> raw_use_counts_; // Not weighted
unsigned int num_reachable_blocks_;
- GrowableArray<int>* dfs_order_;
- GrowableArray<int>* dfs_post_order_;
- GrowableArray<int>* dom_post_order_traversal_;
+ GrowableArray<BasicBlockId>* dfs_order_;
+ GrowableArray<BasicBlockId>* dfs_post_order_;
+ GrowableArray<BasicBlockId>* dom_post_order_traversal_;
int* i_dom_list_;
ArenaBitVector** def_block_matrix_; // num_dalvik_register x num_blocks.
ArenaBitVector* temp_block_v_;
@@ -752,11 +776,11 @@
typedef std::pair<int, int> MIRLocation; // Insert point, (m_unit_ index, offset)
std::vector<MIRLocation> method_stack_; // Include stack
int current_method_;
- int current_offset_; // Dex offset in code units
+ DexOffset current_offset_; // Offset in code units
int def_count_; // Used to estimate size of ssa name storage.
int* opcode_count_; // Dex opcode coverage stats.
int num_ssa_regs_; // Number of names following SSA transformation.
- std::vector<BasicBlock*> extended_basic_blocks_; // Heads of block "traces".
+ std::vector<BasicBlockId> extended_basic_blocks_; // Heads of block "traces".
int method_sreg_;
unsigned int attributes_;
Checkstats* checkstats_;
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 05e428e..3cd158f 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -103,12 +103,12 @@
}
/* Advance to next strictly dominated MIR node in an extended basic block */
-static MIR* AdvanceMIR(BasicBlock** p_bb, MIR* mir) {
+MIR* MIRGraph::AdvanceMIR(BasicBlock** p_bb, MIR* mir) {
BasicBlock* bb = *p_bb;
if (mir != NULL) {
mir = mir->next;
if (mir == NULL) {
- bb = bb->fall_through;
+ bb = GetBasicBlock(bb->fall_through);
if ((bb == NULL) || Predecessors(bb) != 1) {
mir = NULL;
} else {
@@ -147,19 +147,21 @@
return mir;
}
-static BasicBlock* NextDominatedBlock(BasicBlock* bb) {
+BasicBlock* MIRGraph::NextDominatedBlock(BasicBlock* bb) {
if (bb->block_type == kDead) {
return NULL;
}
DCHECK((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
|| (bb->block_type == kExitBlock));
- if (((bb->taken != NULL) && (bb->fall_through == NULL)) &&
- ((bb->taken->block_type == kDalvikByteCode) || (bb->taken->block_type == kExitBlock))) {
+ BasicBlock* bb_taken = GetBasicBlock(bb->taken);
+ BasicBlock* bb_fall_through = GetBasicBlock(bb->fall_through);
+ if (((bb_taken != NULL) && (bb_fall_through == NULL)) &&
+ ((bb_taken->block_type == kDalvikByteCode) || (bb_taken->block_type == kExitBlock))) {
// Follow simple unconditional branches.
- bb = bb->taken;
+ bb = bb_taken;
} else {
// Follow simple fallthrough
- bb = (bb->taken != NULL) ? NULL : bb->fall_through;
+ bb = (bb_taken != NULL) ? NULL : bb_fall_through;
}
if (bb == NULL || (Predecessors(bb) != 1)) {
return NULL;
@@ -311,11 +313,13 @@
case Instruction::IF_GTZ:
case Instruction::IF_LEZ:
// If we've got a backwards branch to return, no need to suspend check.
- if ((IsBackedge(bb, bb->taken) && bb->taken->dominates_return) ||
- (IsBackedge(bb, bb->fall_through) && bb->fall_through->dominates_return)) {
+ if ((IsBackedge(bb, bb->taken) && GetBasicBlock(bb->taken)->dominates_return) ||
+ (IsBackedge(bb, bb->fall_through) &&
+ GetBasicBlock(bb->fall_through)->dominates_return)) {
mir->optimization_flags |= MIR_IGNORE_SUSPEND_CHECK;
if (cu_->verbose) {
- LOG(INFO) << "Suppressed suspend check on branch to return at 0x" << std::hex << mir->offset;
+ LOG(INFO) << "Suppressed suspend check on branch to return at 0x" << std::hex
+ << mir->offset;
}
}
break;
@@ -328,15 +332,15 @@
if (!(cu_->compiler_backend == kPortable) && (cu_->instruction_set == kThumb2) &&
((mir->dalvikInsn.opcode == Instruction::IF_EQZ) ||
(mir->dalvikInsn.opcode == Instruction::IF_NEZ))) {
- BasicBlock* ft = bb->fall_through;
+ BasicBlock* ft = GetBasicBlock(bb->fall_through);
DCHECK(ft != NULL);
- BasicBlock* ft_ft = ft->fall_through;
- BasicBlock* ft_tk = ft->taken;
+ BasicBlock* ft_ft = GetBasicBlock(ft->fall_through);
+ BasicBlock* ft_tk = GetBasicBlock(ft->taken);
- BasicBlock* tk = bb->taken;
+ BasicBlock* tk = GetBasicBlock(bb->taken);
DCHECK(tk != NULL);
- BasicBlock* tk_ft = tk->fall_through;
- BasicBlock* tk_tk = tk->taken;
+ BasicBlock* tk_ft = GetBasicBlock(tk->fall_through);
+ BasicBlock* tk_tk = GetBasicBlock(tk->taken);
/*
* In the select pattern, the taken edge goes to a block that unconditionally
@@ -434,7 +438,7 @@
int dead_def = if_false->ssa_rep->defs[0];
int live_def = if_true->ssa_rep->defs[0];
mir->ssa_rep->defs[0] = live_def;
- int* incoming = reinterpret_cast<int*>(phi->dalvikInsn.vB);
+ BasicBlockId* incoming = phi->meta.phi_incoming;
for (int i = 0; i < phi->ssa_rep->num_uses; i++) {
if (phi->ssa_rep->uses[i] == live_def) {
incoming[i] = bb->id;
@@ -449,7 +453,7 @@
}
}
phi->ssa_rep->num_uses--;
- bb->taken = NULL;
+ bb->taken = NullBasicBlockId;
tk->block_type = kDead;
for (MIR* tmir = ft->first_mir_insn; tmir != NULL; tmir = tmir->next) {
tmir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
@@ -500,7 +504,7 @@
}
/* Try to make common case the fallthrough path */
-static bool LayoutBlocks(struct BasicBlock* bb) {
+bool MIRGraph::LayoutBlocks(BasicBlock* bb) {
// TODO: For now, just looking for direct throws. Consider generalizing for profile feedback
if (!bb->explicit_throw) {
return false;
@@ -511,13 +515,13 @@
if ((walker->block_type == kEntryBlock) || (Predecessors(walker) != 1)) {
break;
}
- BasicBlock* prev = walker->predecessors->Get(0);
+ BasicBlock* prev = GetBasicBlock(walker->predecessors->Get(0));
if (prev->conditional_branch) {
- if (prev->fall_through == walker) {
+ if (GetBasicBlock(prev->fall_through) == walker) {
// Already done - return
break;
}
- DCHECK_EQ(walker, prev->taken);
+ DCHECK_EQ(walker, GetBasicBlock(prev->taken));
// Got one. Flip it and exit
Instruction::Code opcode = prev->last_mir_insn->dalvikInsn.opcode;
switch (opcode) {
@@ -536,7 +540,7 @@
default: LOG(FATAL) << "Unexpected opcode " << opcode;
}
prev->last_mir_insn->dalvikInsn.opcode = opcode;
- BasicBlock* t_bb = prev->taken;
+ BasicBlockId t_bb = prev->taken;
prev->taken = prev->fall_through;
prev->fall_through = t_bb;
break;
@@ -556,8 +560,9 @@
|| (bb->block_type == kExceptionHandling)
|| (bb->block_type == kExitBlock)
|| (bb->block_type == kDead)
- || ((bb->taken == NULL) || (bb->taken->block_type != kExceptionHandling))
- || (bb->successor_block_list.block_list_type != kNotUsed)
+ || (bb->taken == NullBasicBlockId)
+ || (GetBasicBlock(bb->taken)->block_type != kExceptionHandling)
+ || (bb->successor_block_list_type != kNotUsed)
|| (static_cast<int>(bb->last_mir_insn->dalvikInsn.opcode) != kMirOpCheck)) {
break;
}
@@ -578,19 +583,18 @@
break;
}
// OK - got one. Combine
- BasicBlock* bb_next = bb->fall_through;
+ BasicBlock* bb_next = GetBasicBlock(bb->fall_through);
DCHECK(!bb_next->catch_entry);
DCHECK_EQ(Predecessors(bb_next), 1U);
- MIR* t_mir = bb->last_mir_insn->prev;
// Overwrite the kOpCheck insn with the paired opcode
DCHECK_EQ(bb_next->first_mir_insn, throw_insn);
*bb->last_mir_insn = *throw_insn;
- bb->last_mir_insn->prev = t_mir;
// Use the successor info from the next block
- bb->successor_block_list = bb_next->successor_block_list;
+ bb->successor_block_list_type = bb_next->successor_block_list_type;
+ bb->successor_blocks = bb_next->successor_blocks;
// Use the ending block linkage from the next block
bb->fall_through = bb_next->fall_through;
- bb->taken->block_type = kDead; // Kill the unused exception block
+ GetBasicBlock(bb->taken)->block_type = kDead; // Kill the unused exception block
bb->taken = bb_next->taken;
// Include the rest of the instructions
bb->last_mir_insn = bb_next->last_mir_insn;
@@ -631,20 +635,20 @@
temp_ssa_register_v_->SetBit(this_reg);
}
} else if (bb->predecessors->Size() == 1) {
- BasicBlock* pred_bb = bb->predecessors->Get(0);
+ BasicBlock* pred_bb = GetBasicBlock(bb->predecessors->Get(0));
temp_ssa_register_v_->Copy(pred_bb->data_flow_info->ending_null_check_v);
if (pred_bb->block_type == kDalvikByteCode) {
// Check to see if predecessor had an explicit null-check.
MIR* last_insn = pred_bb->last_mir_insn;
Instruction::Code last_opcode = last_insn->dalvikInsn.opcode;
if (last_opcode == Instruction::IF_EQZ) {
- if (pred_bb->fall_through == bb) {
+ if (pred_bb->fall_through == bb->id) {
// The fall-through of a block following a IF_EQZ, set the vA of the IF_EQZ to show that
// it can't be null.
temp_ssa_register_v_->SetBit(last_insn->ssa_rep->uses[0]);
}
} else if (last_opcode == Instruction::IF_NEZ) {
- if (pred_bb->taken == bb) {
+ if (pred_bb->taken == bb->id) {
// The taken block following a IF_NEZ, set the vA of the IF_NEZ to show that it can't be
// null.
temp_ssa_register_v_->SetBit(last_insn->ssa_rep->uses[0]);
@@ -653,12 +657,12 @@
}
} else {
// Starting state is intersection of all incoming arcs
- GrowableArray<BasicBlock*>::Iterator iter(bb->predecessors);
- BasicBlock* pred_bb = iter.Next();
+ GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
+ BasicBlock* pred_bb = GetBasicBlock(iter.Next());
DCHECK(pred_bb != NULL);
temp_ssa_register_v_->Copy(pred_bb->data_flow_info->ending_null_check_v);
while (true) {
- pred_bb = iter.Next();
+ pred_bb = GetBasicBlock(iter.Next());
if (!pred_bb) break;
if ((pred_bb->data_flow_info == NULL) ||
(pred_bb->data_flow_info->ending_null_check_v == NULL)) {
@@ -691,9 +695,9 @@
} else {
if (next_mir) {
LOG(WARNING) << "Unexpected opcode following new: " << next_mir->dalvikInsn.opcode;
- } else if (bb->fall_through) {
+ } else if (bb->fall_through != NullBasicBlockId) {
// Look in next basic block
- struct BasicBlock* next_bb = bb->fall_through;
+ struct BasicBlock* next_bb = GetBasicBlock(bb->fall_through);
for (MIR* tmir = next_bb->first_mir_insn; tmir != NULL;
tmir =tmir->next) {
if (static_cast<int>(tmir->dalvikInsn.opcode) >= static_cast<int>(kMirOpFirst)) {
@@ -834,7 +838,7 @@
}
// Must be head of extended basic block.
BasicBlock* start_bb = bb;
- extended_basic_blocks_.push_back(bb);
+ extended_basic_blocks_.push_back(bb->id);
bool terminated_by_return = false;
// Visit blocks strictly dominated by this head.
while (bb != NULL) {
@@ -864,7 +868,7 @@
}
// Perform extended basic block optimizations.
for (unsigned int i = 0; i < extended_basic_blocks_.size(); i++) {
- BasicBlockOpt(extended_basic_blocks_[i]);
+ BasicBlockOpt(GetBasicBlock(extended_basic_blocks_[i]));
}
}
if (cu_->enable_debug & (1 << kDebugDumpCFG)) {
diff --git a/compiler/dex/portable/mir_to_gbc.cc b/compiler/dex/portable/mir_to_gbc.cc
index df10f7e..963cbeb 100644
--- a/compiler/dex/portable/mir_to_gbc.cc
+++ b/compiler/dex/portable/mir_to_gbc.cc
@@ -132,7 +132,7 @@
::llvm::Value* value = GetLLVMValue(rl_src.orig_sreg);
::llvm::SwitchInst* sw =
- irb_->CreateSwitch(value, GetLLVMBlock(bb->fall_through->id),
+ irb_->CreateSwitch(value, GetLLVMBlock(bb->fall_through),
payload->case_count);
for (uint16_t i = 0; i < payload->case_count; ++i) {
@@ -143,8 +143,8 @@
::llvm::MDNode* switch_node =
::llvm::MDNode::get(*context_, irb_->getInt32(table_offset));
sw->setMetadata("SwitchTable", switch_node);
- bb->taken = NULL;
- bb->fall_through = NULL;
+ bb->taken = NullBasicBlockId;
+ bb->fall_through = NullBasicBlockId;
}
void MirConverter::ConvertSparseSwitch(BasicBlock* bb,
@@ -159,7 +159,7 @@
::llvm::Value* value = GetLLVMValue(rl_src.orig_sreg);
::llvm::SwitchInst* sw =
- irb_->CreateSwitch(value, GetLLVMBlock(bb->fall_through->id),
+ irb_->CreateSwitch(value, GetLLVMBlock(bb->fall_through),
payload->case_count);
for (size_t i = 0; i < payload->case_count; ++i) {
@@ -170,8 +170,8 @@
::llvm::MDNode* switch_node =
::llvm::MDNode::get(*context_, irb_->getInt32(table_offset));
sw->setMetadata("SwitchTable", switch_node);
- bb->taken = NULL;
- bb->fall_through = NULL;
+ bb->taken = NullBasicBlockId;
+ bb->fall_through = NullBasicBlockId;
}
void MirConverter::ConvertSget(int32_t field_index,
@@ -311,22 +311,22 @@
void MirConverter::ConvertCompareAndBranch(BasicBlock* bb, MIR* mir,
ConditionCode cc, RegLocation rl_src1, RegLocation rl_src2) {
- if (bb->taken->start_offset <= mir->offset) {
+ if (mir_graph_->GetBasicBlock(bb->taken)->start_offset <= mir->offset) {
EmitSuspendCheck();
}
::llvm::Value* src1 = GetLLVMValue(rl_src1.orig_sreg);
::llvm::Value* src2 = GetLLVMValue(rl_src2.orig_sreg);
::llvm::Value* cond_value = ConvertCompare(cc, src1, src2);
cond_value->setName(StringPrintf("t%d", temp_name_++));
- irb_->CreateCondBr(cond_value, GetLLVMBlock(bb->taken->id),
- GetLLVMBlock(bb->fall_through->id));
+ irb_->CreateCondBr(cond_value, GetLLVMBlock(bb->taken),
+ GetLLVMBlock(bb->fall_through));
// Don't redo the fallthrough branch in the BB driver
- bb->fall_through = NULL;
+ bb->fall_through = NullBasicBlockId;
}
void MirConverter::ConvertCompareZeroAndBranch(BasicBlock* bb,
MIR* mir, ConditionCode cc, RegLocation rl_src1) {
- if (bb->taken->start_offset <= mir->offset) {
+ if (mir_graph_->GetBasicBlock(bb->taken)->start_offset <= mir->offset) {
EmitSuspendCheck();
}
::llvm::Value* src1 = GetLLVMValue(rl_src1.orig_sreg);
@@ -337,10 +337,10 @@
src2 = irb_->getInt32(0);
}
::llvm::Value* cond_value = ConvertCompare(cc, src1, src2);
- irb_->CreateCondBr(cond_value, GetLLVMBlock(bb->taken->id),
- GetLLVMBlock(bb->fall_through->id));
+ irb_->CreateCondBr(cond_value, GetLLVMBlock(bb->taken),
+ GetLLVMBlock(bb->fall_through));
// Don't redo the fallthrough branch in the BB driver
- bb->fall_through = NULL;
+ bb->fall_through = NullBasicBlockId;
}
::llvm::Value* MirConverter::GenDivModOp(bool is_div, bool is_long,
@@ -941,10 +941,10 @@
case Instruction::GOTO:
case Instruction::GOTO_16:
case Instruction::GOTO_32: {
- if (bb->taken->start_offset <= bb->start_offset) {
+ if (mir_graph_->GetBasicBlock(bb->taken)->start_offset <= bb->start_offset) {
EmitSuspendCheck();
}
- irb_->CreateBr(GetLLVMBlock(bb->taken->id));
+ irb_->CreateBr(GetLLVMBlock(bb->taken));
}
break;
@@ -1190,11 +1190,11 @@
* If it might rethrow, force termination
* of the following block.
*/
- if (bb->fall_through == NULL) {
+ if (bb->fall_through == NullBasicBlockId) {
irb_->CreateUnreachable();
} else {
- bb->fall_through->fall_through = NULL;
- bb->fall_through->taken = NULL;
+ mir_graph_->GetBasicBlock(bb->fall_through)->fall_through = NullBasicBlockId;
+ mir_graph_->GetBasicBlock(bb->fall_through)->taken = NullBasicBlockId;
}
break;
@@ -1552,7 +1552,7 @@
if (rl_dest.high_word) {
continue; // No Phi node - handled via low word
}
- int* incoming = reinterpret_cast<int*>(mir->dalvikInsn.vB);
+ BasicBlockId* incoming = mir->meta.phi_incoming;
::llvm::Type* phi_type =
LlvmTypeFromLocRec(rl_dest);
::llvm::PHINode* phi = irb_->CreatePHI(phi_type, mir->ssa_rep->num_uses);
@@ -1597,8 +1597,8 @@
break;
}
case kMirOpNop:
- if ((mir == bb->last_mir_insn) && (bb->taken == NULL) &&
- (bb->fall_through == NULL)) {
+ if ((mir == bb->last_mir_insn) && (bb->taken == NullBasicBlockId) &&
+ (bb->fall_through == NullBasicBlockId)) {
irb_->CreateUnreachable();
}
break;
@@ -1718,25 +1718,23 @@
SSARepresentation* ssa_rep = work_half->ssa_rep;
work_half->ssa_rep = mir->ssa_rep;
mir->ssa_rep = ssa_rep;
- work_half->meta.original_opcode = work_half->dalvikInsn.opcode;
work_half->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
- if (bb->successor_block_list.block_list_type == kCatch) {
+ if (bb->successor_block_list_type == kCatch) {
::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(
art::llvm::IntrinsicHelper::CatchTargets);
::llvm::Value* switch_key =
irb_->CreateCall(intr, irb_->getInt32(mir->offset));
- GrowableArray<SuccessorBlockInfo*>::Iterator iter(bb->successor_block_list.blocks);
+ GrowableArray<SuccessorBlockInfo*>::Iterator iter(bb->successor_blocks);
// New basic block to use for work half
::llvm::BasicBlock* work_bb =
::llvm::BasicBlock::Create(*context_, "", func_);
::llvm::SwitchInst* sw =
- irb_->CreateSwitch(switch_key, work_bb,
- bb->successor_block_list.blocks->Size());
+ irb_->CreateSwitch(switch_key, work_bb, bb->successor_blocks->Size());
while (true) {
SuccessorBlockInfo *successor_block_info = iter.Next();
if (successor_block_info == NULL) break;
::llvm::BasicBlock *target =
- GetLLVMBlock(successor_block_info->block->id);
+ GetLLVMBlock(successor_block_info->block);
int type_index = successor_block_info->key;
sw->addCase(irb_->getInt32(type_index), target);
}
@@ -1761,9 +1759,9 @@
}
if (bb->block_type == kEntryBlock) {
- entry_target_bb_ = GetLLVMBlock(bb->fall_through->id);
- } else if ((bb->fall_through != NULL) && !bb->terminated_by_return) {
- irb_->CreateBr(GetLLVMBlock(bb->fall_through->id));
+ entry_target_bb_ = GetLLVMBlock(bb->fall_through);
+ } else if ((bb->fall_through != NullBasicBlockId) && !bb->terminated_by_return) {
+ irb_->CreateBr(GetLLVMBlock(bb->fall_through));
}
return false;
diff --git a/compiler/dex/quick/arm/arm_lir.h b/compiler/dex/quick/arm/arm_lir.h
index d184673..2ff7f1c 100644
--- a/compiler/dex/quick/arm/arm_lir.h
+++ b/compiler/dex/quick/arm/arm_lir.h
@@ -296,6 +296,8 @@
kThumbOrr, // orr [0100001100] rm[5..3] rd[2..0].
kThumbPop, // pop [1011110] r[8..8] rl[7..0].
kThumbPush, // push [1011010] r[8..8] rl[7..0].
+ kThumbRev, // rev [1011101000] rm[5..3] rd[2..0]
+ kThumbRevsh, // revsh [1011101011] rm[5..3] rd[2..0]
kThumbRorRR, // ror [0100000111] rs[5..3] rd[2..0].
kThumbSbc, // sbc [0100000110] rm[5..3] rd[2..0].
kThumbStmia, // stmia [11000] rn[10..8] reglist [7.. 0].
@@ -399,6 +401,8 @@
kThumb2AdcRRI8, // adc [111100010101] rn[19..16] [0] imm3 rd[11..8] imm8.
kThumb2SubRRI8, // sub [111100011011] rn[19..16] [0] imm3 rd[11..8] imm8.
kThumb2SbcRRI8, // sbc [111100010111] rn[19..16] [0] imm3 rd[11..8] imm8.
+ kThumb2RevRR, // rev [111110101001] rm[19..16] [1111] rd[11..8] 1000 rm[3..0]
+ kThumb2RevshRR, // rev [111110101001] rm[19..16] [1111] rd[11..8] 1011 rm[3..0]
kThumb2It, // it [10111111] firstcond[7-4] mask[3-0].
kThumb2Fmstat, // fmstat [11101110111100011111101000010000].
kThumb2Vcmpd, // vcmp [111011101] D [11011] rd[15-12] [1011] E [1] M [0] rm[3-0].
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index 3c646c4..e8c188c 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -327,6 +327,16 @@
kFmtUnused, -1, -1,
IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_USE_LIST0
| IS_STORE, "push", "<!0R>", 2, kFixupNone),
+ ENCODING_MAP(kThumbRev, 0xba00,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_BINARY_OP | REG_DEF0_USE1,
+ "rev", "!0C, !1C", 2, kFixupNone),
+ ENCODING_MAP(kThumbRevsh, 0xbac0,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_BINARY_OP | REG_DEF0_USE1,
+ "rev", "!0C, !1C", 2, kFixupNone),
ENCODING_MAP(kThumbRorRR, 0x41c0,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
kFmtUnused, -1, -1,
@@ -768,6 +778,16 @@
kFmtUnused, -1, -1,
IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES | USES_CCODES,
"sbcs", "!0C, !1C, #!2m", 4, kFixupNone),
+ ENCODING_MAP(kThumb2RevRR, 0xfa90f080,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+ kFmtUnused, -1, -1,
+ IS_TERTIARY_OP | REG_DEF0_USE12, // Binary, but rm is stored twice.
+ "rev", "!0C, !1C", 4, kFixupNone),
+ ENCODING_MAP(kThumb2RevshRR, 0xfa90f0b0,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+ kFmtUnused, -1, -1,
+ IS_TERTIARY_OP | REG_DEF0_USE12, // Binary, but rm is stored twice.
+ "revsh", "!0C, !1C", 4, kFixupNone),
ENCODING_MAP(kThumb2It, 0xbf00,
kFmtBitBlt, 7, 4, kFmtBitBlt, 3, 0, kFmtModImm, -1, -1,
kFmtUnused, -1, -1, IS_BINARY_OP | IS_IT | USES_CCODES,
@@ -1031,8 +1051,7 @@
} else if (LIKELY(!lir->flags.is_nop)) {
const ArmEncodingMap *encoder = &EncodingMap[lir->opcode];
uint32_t bits = encoder->skeleton;
- int i;
- for (i = 0; i < 4; i++) {
+ for (int i = 0; i < 4; i++) {
uint32_t operand;
uint32_t value;
operand = lir->operands[i];
@@ -1088,7 +1107,7 @@
case kFmtDfp: {
DCHECK(ARM_DOUBLEREG(operand));
DCHECK_EQ((operand & 0x1), 0U);
- int reg_name = (operand & ARM_FP_REG_MASK) >> 1;
+ uint32_t reg_name = (operand & ARM_FP_REG_MASK) >> 1;
/* Snag the 1-bit slice and position it */
value = ((reg_name & 0x10) >> 4) << encoder->field_loc[i].end;
/* Extract and position the 4-bit slice */
@@ -1154,10 +1173,11 @@
void ArmMir2Lir::AssembleLIR() {
LIR* lir;
LIR* prev_lir;
+ cu_->NewTimingSplit("Assemble");
int assembler_retries = 0;
- int starting_offset = EncodeRange(first_lir_insn_, last_lir_insn_, 0);
+ CodeOffset starting_offset = EncodeRange(first_lir_insn_, last_lir_insn_, 0);
data_offset_ = (starting_offset + 0x3) & ~0x3;
- int offset_adjustment;
+ int32_t offset_adjustment;
AssignDataOffsets();
/*
@@ -1200,10 +1220,10 @@
* we revert to a multiple-instruction materialization sequence.
*/
LIR *lir_target = lir->target;
- uintptr_t pc = (lir->offset + 4) & ~3;
- uintptr_t target = lir_target->offset +
+ CodeOffset pc = (lir->offset + 4) & ~3;
+ CodeOffset target = lir_target->offset +
((lir_target->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
- int delta = target - pc;
+ int32_t delta = target - pc;
if (res != kSuccess) {
/*
* In this case, we're just estimating and will do it again for real. Ensure offset
@@ -1281,10 +1301,10 @@
}
case kFixupCBxZ: {
LIR *target_lir = lir->target;
- uintptr_t pc = lir->offset + 4;
- uintptr_t target = target_lir->offset +
+ CodeOffset pc = lir->offset + 4;
+ CodeOffset target = target_lir->offset +
((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
- int delta = target - pc;
+ int32_t delta = target - pc;
if (delta > 126 || delta < 0) {
/*
* Convert to cmp rx,#0 / b[eq/ne] tgt pair
@@ -1351,10 +1371,10 @@
}
case kFixupCondBranch: {
LIR *target_lir = lir->target;
- int delta = 0;
+ int32_t delta = 0;
DCHECK(target_lir);
- uintptr_t pc = lir->offset + 4;
- uintptr_t target = target_lir->offset +
+ CodeOffset pc = lir->offset + 4;
+ CodeOffset target = target_lir->offset +
((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
delta = target - pc;
if ((lir->opcode == kThumbBCond) && (delta > 254 || delta < -256)) {
@@ -1370,10 +1390,10 @@
}
case kFixupT2Branch: {
LIR *target_lir = lir->target;
- uintptr_t pc = lir->offset + 4;
- uintptr_t target = target_lir->offset +
+ CodeOffset pc = lir->offset + 4;
+ CodeOffset target = target_lir->offset +
((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
- int delta = target - pc;
+ int32_t delta = target - pc;
lir->operands[0] = delta >> 1;
if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && lir->operands[0] == 0) {
// Useless branch
@@ -1387,10 +1407,10 @@
}
case kFixupT1Branch: {
LIR *target_lir = lir->target;
- uintptr_t pc = lir->offset + 4;
- uintptr_t target = target_lir->offset +
+ CodeOffset pc = lir->offset + 4;
+ CodeOffset target = target_lir->offset +
((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
- int delta = target - pc;
+ int32_t delta = target - pc;
if (delta > 2046 || delta < -2048) {
// Convert to Thumb2BCond w/ kArmCondAl
offset_adjustment -= lir->flags.size;
@@ -1416,14 +1436,14 @@
case kFixupBlx1: {
DCHECK(NEXT_LIR(lir)->opcode == kThumbBlx2);
/* cur_pc is Thumb */
- uintptr_t cur_pc = (lir->offset + 4) & ~3;
- uintptr_t target = lir->operands[1];
+ CodeOffset cur_pc = (lir->offset + 4) & ~3;
+ CodeOffset target = lir->operands[1];
/* Match bit[1] in target with base */
if (cur_pc & 0x2) {
target |= 0x2;
}
- int delta = target - cur_pc;
+ int32_t delta = target - cur_pc;
DCHECK((delta >= -(1<<22)) && (delta <= ((1<<22)-2)));
lir->operands[0] = (delta >> 12) & 0x7ff;
@@ -1433,10 +1453,10 @@
case kFixupBl1: {
DCHECK(NEXT_LIR(lir)->opcode == kThumbBl2);
/* Both cur_pc and target are Thumb */
- uintptr_t cur_pc = lir->offset + 4;
- uintptr_t target = lir->operands[1];
+ CodeOffset cur_pc = lir->offset + 4;
+ CodeOffset target = lir->operands[1];
- int delta = target - cur_pc;
+ int32_t delta = target - cur_pc;
DCHECK((delta >= -(1<<22)) && (delta <= ((1<<22)-2)));
lir->operands[0] = (delta >> 12) & 0x7ff;
@@ -1444,20 +1464,19 @@
break;
}
case kFixupAdr: {
- SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[2]);
+ EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[2]));
LIR* target = lir->target;
- int target_disp = (tab_rec != NULL) ? tab_rec->offset + offset_adjustment
+ int32_t target_disp = (tab_rec != NULL) ? tab_rec->offset + offset_adjustment
: target->offset + ((target->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
- int disp = target_disp - ((lir->offset + 4) & ~3);
+ int32_t disp = target_disp - ((lir->offset + 4) & ~3);
if (disp < 4096) {
lir->operands[1] = disp;
} else {
// convert to ldimm16l, ldimm16h, add tgt, pc, operands[0]
// TUNING: if this case fires often, it can be improved. Not expected to be common.
LIR *new_mov16L =
- RawLIR(lir->dalvik_offset, kThumb2MovImm16LST,
- lir->operands[0], 0, reinterpret_cast<uintptr_t>(lir),
- reinterpret_cast<uintptr_t>(tab_rec), 0, lir->target);
+ RawLIR(lir->dalvik_offset, kThumb2MovImm16LST, lir->operands[0], 0,
+ WrapPointer(lir), WrapPointer(tab_rec), 0, lir->target);
new_mov16L->flags.size = EncodingMap[new_mov16L->opcode].size;
new_mov16L->flags.fixup = kFixupMovImmLST;
new_mov16L->offset = lir->offset;
@@ -1467,11 +1486,9 @@
offset_adjustment += new_mov16L->flags.size;
InsertFixupBefore(prev_lir, lir, new_mov16L);
prev_lir = new_mov16L; // Now we've got a new prev.
-
LIR *new_mov16H =
- RawLIR(lir->dalvik_offset, kThumb2MovImm16HST,
- lir->operands[0], 0, reinterpret_cast<uintptr_t>(lir),
- reinterpret_cast<uintptr_t>(tab_rec), 0, lir->target);
+ RawLIR(lir->dalvik_offset, kThumb2MovImm16HST, lir->operands[0], 0,
+ WrapPointer(lir), WrapPointer(tab_rec), 0, lir->target);
new_mov16H->flags.size = EncodingMap[new_mov16H->opcode].size;
new_mov16H->flags.fixup = kFixupMovImmHST;
new_mov16H->offset = lir->offset;
@@ -1499,27 +1516,27 @@
}
case kFixupMovImmLST: {
// operands[1] should hold disp, [2] has add, [3] has tab_rec
- LIR *addPCInst = reinterpret_cast<LIR*>(lir->operands[2]);
- SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+ LIR *addPCInst = reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2]));
+ EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
// If tab_rec is null, this is a literal load. Use target
LIR* target = lir->target;
- int target_disp = tab_rec ? tab_rec->offset : target->offset;
+ int32_t target_disp = tab_rec ? tab_rec->offset : target->offset;
lir->operands[1] = (target_disp - (addPCInst->offset + 4)) & 0xffff;
break;
}
case kFixupMovImmHST: {
// operands[1] should hold disp, [2] has add, [3] has tab_rec
- LIR *addPCInst = reinterpret_cast<LIR*>(lir->operands[2]);
- SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+ LIR *addPCInst = reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2]));
+ EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
// If tab_rec is null, this is a literal load. Use target
LIR* target = lir->target;
- int target_disp = tab_rec ? tab_rec->offset : target->offset;
+ int32_t target_disp = tab_rec ? tab_rec->offset : target->offset;
lir->operands[1] =
((target_disp - (addPCInst->offset + 4)) >> 16) & 0xffff;
break;
}
case kFixupAlign4: {
- int required_size = lir->offset & 0x2;
+ int32_t required_size = lir->offset & 0x2;
if (lir->flags.size != required_size) {
offset_adjustment += required_size - lir->flags.size;
lir->flags.size = required_size;
@@ -1578,6 +1595,7 @@
data_offset_ = (code_buffer_.size() + 0x3) & ~0x3;
+ cu_->NewTimingSplit("LiteralData");
// Install literals
InstallLiteralPools();
@@ -1588,8 +1606,10 @@
InstallFillArrayData();
// Create the mapping table and native offset to reference map.
+ cu_->NewTimingSplit("PcMappingTable");
CreateMappingTables();
+ cu_->NewTimingSplit("GcMap");
CreateNativeGcMap();
}
@@ -1647,7 +1667,7 @@
void ArmMir2Lir::AssignDataOffsets() {
/* Set up offsets for literals */
- int offset = data_offset_;
+ CodeOffset offset = data_offset_;
offset = AssignLiteralOffset(offset);
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 401da2a..51aca85 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -92,7 +92,7 @@
}
/* Find the next MIR, which may be in a following basic block */
-// TODO: should this be a utility in mir_graph?
+// TODO: make this a utility in mir_graph.
MIR* ArmMir2Lir::GetNextMir(BasicBlock** p_bb, MIR* mir) {
BasicBlock* bb = *p_bb;
MIR* orig_mir = mir;
@@ -103,7 +103,7 @@
if (mir != NULL) {
return mir;
} else {
- bb = bb->fall_through;
+ bb = mir_graph_->GetBasicBlock(bb->fall_through);
*p_bb = bb;
if (bb) {
mir = bb->first_mir_insn;
@@ -128,7 +128,7 @@
MIR* ArmMir2Lir::SpecialIGet(BasicBlock** bb, MIR* mir,
OpSize size, bool long_or_double, bool is_object) {
- int field_offset;
+ int32_t field_offset;
bool is_volatile;
uint32_t field_idx = mir->dalvikInsn.vC;
bool fast_path = FastInstance(field_idx, false, &field_offset, &is_volatile);
@@ -153,7 +153,7 @@
MIR* ArmMir2Lir::SpecialIPut(BasicBlock** bb, MIR* mir,
OpSize size, bool long_or_double, bool is_object) {
- int field_offset;
+ int32_t field_offset;
bool is_volatile;
uint32_t field_idx = mir->dalvikInsn.vC;
bool fast_path = FastInstance(field_idx, false, &field_offset, &is_volatile);
@@ -320,9 +320,9 @@
static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
- int size = table[1];
+ uint32_t size = table[1];
tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*),
- ArenaAllocator::kAllocLIR));
+ ArenaAllocator::kAllocLIR));
switch_tables_.Insert(tab_rec);
// Get the switch value
@@ -338,7 +338,7 @@
r_key = tmp;
}
// Materialize a pointer to the switch table
- NewLIR3(kThumb2Adr, rBase, 0, reinterpret_cast<uintptr_t>(tab_rec));
+ NewLIR3(kThumb2Adr, rBase, 0, WrapPointer(tab_rec));
// Set up r_idx
int r_idx = AllocTemp();
LoadConstant(r_idx, size);
@@ -368,7 +368,7 @@
static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
- int size = table[1];
+ uint32_t size = table[1];
tab_rec->targets =
static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), ArenaAllocator::kAllocLIR));
switch_tables_.Insert(tab_rec);
@@ -377,7 +377,7 @@
rl_src = LoadValue(rl_src, kCoreReg);
int table_base = AllocTemp();
// Materialize a pointer to the switch table
- NewLIR3(kThumb2Adr, table_base, 0, reinterpret_cast<uintptr_t>(tab_rec));
+ NewLIR3(kThumb2Adr, table_base, 0, WrapPointer(tab_rec));
int low_key = s4FromSwitchData(&table[2]);
int keyReg;
// Remove the bias, if necessary
@@ -433,7 +433,7 @@
LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayData).Int32Value(),
rARM_LR);
// Materialize a pointer to the fill data image
- NewLIR3(kThumb2Adr, r1, 0, reinterpret_cast<uintptr_t>(tab_rec));
+ NewLIR3(kThumb2Adr, r1, 0, WrapPointer(tab_rec));
ClobberCalleeSave();
LIR* call_inst = OpReg(kOpBlx, rARM_LR);
MarkSafepointPC(call_inst);
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index aa5782b..0a3bfc1 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -74,7 +74,6 @@
uint32_t EncodeRange(LIR* head_lir, LIR* tail_lir, uint32_t starting_offset);
int AssignInsnOffsets();
void AssignOffsets();
- AssemblerStatus AssembleInstructions(uintptr_t start_addr);
void EncodeLIR(LIR* lir);
void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
void SetupTargetResourceMasks(LIR* lir, uint64_t flags);
@@ -120,7 +119,7 @@
void GenDivZeroCheck(int reg_lo, int reg_hi);
void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
void GenExitSequence();
- void GenFillArrayData(uint32_t table_offset, RegLocation rl_src);
+ void GenFillArrayData(DexOffset table_offset, RegLocation rl_src);
void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
void GenSelect(BasicBlock* bb, MIR* mir);
@@ -132,8 +131,8 @@
int first_bit, int second_bit);
void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
- void GenPackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
- void GenSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+ void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
+ void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
void GenSpecialCase(BasicBlock* bb, MIR* mir, SpecialCaseHandler special_case);
// Required for target - single operation generators.
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
index 08d6778..480e021 100644
--- a/compiler/dex/quick/arm/fp_arm.cc
+++ b/compiler/dex/quick/arm/fp_arm.cc
@@ -176,7 +176,7 @@
void ArmMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
bool is_double) {
- LIR* target = &block_label_list_[bb->taken->id];
+ LIR* target = &block_label_list_[bb->taken];
RegLocation rl_src1;
RegLocation rl_src2;
if (is_double) {
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index a89d161..c3140a5 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -122,8 +122,8 @@
int32_t val_hi = High32Bits(val);
DCHECK_GE(ModifiedImmediate(val_lo), 0);
DCHECK_GE(ModifiedImmediate(val_hi), 0);
- LIR* taken = &block_label_list_[bb->taken->id];
- LIR* not_taken = &block_label_list_[bb->fall_through->id];
+ LIR* taken = &block_label_list_[bb->taken];
+ LIR* not_taken = &block_label_list_[bb->fall_through];
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
int32_t low_reg = rl_src1.low_reg;
int32_t high_reg = rl_src1.high_reg;
@@ -178,23 +178,6 @@
void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
RegLocation rl_result;
RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
- // Temporary debugging code
- int dest_sreg = mir->ssa_rep->defs[0];
- if ((dest_sreg < 0) || (dest_sreg >= mir_graph_->GetNumSSARegs())) {
- LOG(INFO) << "Bad target sreg: " << dest_sreg << ", in "
- << PrettyMethod(cu_->method_idx, *cu_->dex_file);
- LOG(INFO) << "at dex offset 0x" << std::hex << mir->offset;
- LOG(INFO) << "vreg = " << mir_graph_->SRegToVReg(dest_sreg);
- LOG(INFO) << "num uses = " << mir->ssa_rep->num_uses;
- if (mir->ssa_rep->num_uses == 1) {
- LOG(INFO) << "CONST case, vals = " << mir->dalvikInsn.vB << ", " << mir->dalvikInsn.vC;
- } else {
- LOG(INFO) << "MOVE case, operands = " << mir->ssa_rep->uses[1] << ", "
- << mir->ssa_rep->uses[2];
- }
- CHECK(false) << "Invalid target sreg on Select.";
- }
- // End temporary debugging code
RegLocation rl_dest = mir_graph_->GetDest(mir);
rl_src = LoadValue(rl_src, kCoreReg);
if (mir->ssa_rep->num_uses == 1) {
@@ -270,8 +253,8 @@
return;
}
}
- LIR* taken = &block_label_list_[bb->taken->id];
- LIR* not_taken = &block_label_list_[bb->fall_through->id];
+ LIR* taken = &block_label_list_[bb->taken];
+ LIR* not_taken = &block_label_list_[bb->fall_through];
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
OpRegReg(kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 933c1a3..3395ae7 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -282,8 +282,8 @@
return buf;
}
-static int ExpandImmediate(int value) {
- int mode = (value & 0xf00) >> 8;
+static int32_t ExpandImmediate(int value) {
+ int32_t mode = (value & 0xf00) >> 8;
uint32_t bits = value & 0xff;
switch (mode) {
case 0:
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 00de8de..3ceeacf 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -22,14 +22,14 @@
/* This file contains codegen for the Thumb ISA. */
-static int EncodeImmSingle(int value) {
- int res;
- int bit_a = (value & 0x80000000) >> 31;
- int not_bit_b = (value & 0x40000000) >> 30;
- int bit_b = (value & 0x20000000) >> 29;
- int b_smear = (value & 0x3e000000) >> 25;
- int slice = (value & 0x01f80000) >> 19;
- int zeroes = (value & 0x0007ffff);
+static int32_t EncodeImmSingle(int32_t value) {
+ int32_t res;
+ int32_t bit_a = (value & 0x80000000) >> 31;
+ int32_t not_bit_b = (value & 0x40000000) >> 30;
+ int32_t bit_b = (value & 0x20000000) >> 29;
+ int32_t b_smear = (value & 0x3e000000) >> 25;
+ int32_t slice = (value & 0x01f80000) >> 19;
+ int32_t zeroes = (value & 0x0007ffff);
if (zeroes != 0)
return -1;
if (bit_b) {
@@ -47,15 +47,15 @@
* Determine whether value can be encoded as a Thumb2 floating point
* immediate. If not, return -1. If so return encoded 8-bit value.
*/
-static int EncodeImmDouble(int64_t value) {
- int res;
- int bit_a = (value & 0x8000000000000000ll) >> 63;
- int not_bit_b = (value & 0x4000000000000000ll) >> 62;
- int bit_b = (value & 0x2000000000000000ll) >> 61;
- int b_smear = (value & 0x3fc0000000000000ll) >> 54;
- int slice = (value & 0x003f000000000000ll) >> 48;
+static int32_t EncodeImmDouble(int64_t value) {
+ int32_t res;
+ int32_t bit_a = (value & 0x8000000000000000ll) >> 63;
+ int32_t not_bit_b = (value & 0x4000000000000000ll) >> 62;
+ int32_t bit_b = (value & 0x2000000000000000ll) >> 61;
+ int32_t b_smear = (value & 0x3fc0000000000000ll) >> 54;
+ int32_t slice = (value & 0x003f000000000000ll) >> 48;
uint64_t zeroes = (value & 0x0000ffffffffffffll);
- if (zeroes != 0)
+ if (zeroes != 0ull)
return -1;
if (bit_b) {
if ((not_bit_b != 0) || (b_smear != 0xff))
@@ -96,8 +96,8 @@
static int LeadingZeros(uint32_t val) {
uint32_t alt;
- int n;
- int count;
+ int32_t n;
+ int32_t count;
count = 16;
n = 32;
@@ -117,8 +117,8 @@
* immediate. If not, return -1. If so, return i:imm3:a:bcdefgh form.
*/
int ArmMir2Lir::ModifiedImmediate(uint32_t value) {
- int z_leading;
- int z_trailing;
+ int32_t z_leading;
+ int32_t z_trailing;
uint32_t b0 = value & 0xff;
/* Note: case of value==0 must use 0:000:0:0000000 encoding */
@@ -314,6 +314,22 @@
case kOpSub:
opcode = (thumb_form) ? kThumbSubRRR : kThumb2SubRRR;
break;
+ case kOpRev:
+ DCHECK_EQ(shift, 0);
+ if (!thumb_form) {
+ // Binary, but rm is encoded twice.
+ return NewLIR3(kThumb2RevRR, r_dest_src1, r_src2, r_src2);
+ }
+ opcode = kThumbRev;
+ break;
+ case kOpRevsh:
+ DCHECK_EQ(shift, 0);
+ if (!thumb_form) {
+ // Binary, but rm is encoded twice.
+ return NewLIR3(kThumb2RevshRR, r_dest_src1, r_src2, r_src2);
+ }
+ opcode = kThumbRevsh;
+ break;
case kOp2Byte:
DCHECK_EQ(shift, 0);
return NewLIR4(kThumb2Sbfx, r_dest_src1, r_src2, 0, 8);
@@ -421,12 +437,12 @@
LIR* ArmMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) {
LIR* res;
bool neg = (value < 0);
- int abs_value = (neg) ? -value : value;
+ int32_t abs_value = (neg) ? -value : value;
ArmOpcode opcode = kThumbBkpt;
ArmOpcode alt_opcode = kThumbBkpt;
bool all_low_regs = (ARM_LOWREG(r_dest) && ARM_LOWREG(r_src1));
- int mod_imm = ModifiedImmediate(value);
- int mod_imm_neg = ModifiedImmediate(-value);
+ int32_t mod_imm = ModifiedImmediate(value);
+ int32_t mod_imm_neg = ModifiedImmediate(-value);
switch (op) {
case kOpLsl:
@@ -544,7 +560,7 @@
/* Handle Thumb-only variants here - otherwise punt to OpRegRegImm */
LIR* ArmMir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value) {
bool neg = (value < 0);
- int abs_value = (neg) ? -value : value;
+ int32_t abs_value = (neg) ? -value : value;
bool short_form = (((abs_value & 0xff) == abs_value) && ARM_LOWREG(r_dest_src1));
ArmOpcode opcode = kThumbBkpt;
switch (op) {
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 617f357..a6653fa 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -160,7 +160,8 @@
break;
case kPseudoDalvikByteCodeBoundary:
if (lir->operands[0] == 0) {
- lir->operands[0] = reinterpret_cast<uintptr_t>("No instruction string");
+ // NOTE: only used for debug listings.
+ lir->operands[0] = WrapPointer(ArenaStrdup("No instruction string"));
}
LOG(INFO) << "-------- dalvik offset: 0x" << std::hex
<< lir->dalvik_offset << " @ " << reinterpret_cast<char*>(lir->operands[0]);
@@ -369,6 +370,17 @@
buf.push_back((data >> 24) & 0xff);
}
+// Push 8 bytes on 64-bit systems; 4 on 32-bit systems.
+static void PushPointer(std::vector<uint8_t>&buf, void const* pointer) {
+ uintptr_t data = reinterpret_cast<uintptr_t>(pointer);
+ if (sizeof(void*) == sizeof(uint64_t)) {
+ PushWord(buf, (data >> (sizeof(void*) * 4)) & 0xFFFFFFFF);
+ PushWord(buf, data & 0xFFFFFFFF);
+ } else {
+ PushWord(buf, data);
+ }
+}
+
static void AlignBuffer(std::vector<uint8_t>&buf, size_t offset) {
while (buf.size() < offset) {
buf.push_back(0);
@@ -395,9 +407,8 @@
static_cast<InvokeType>(data_lir->operands[1]),
code_buffer_.size());
const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target);
- // unique based on target to ensure code deduplication works
- uint32_t unique_patch_value = reinterpret_cast<uint32_t>(&id);
- PushWord(code_buffer_, unique_patch_value);
+ // unique value based on target to ensure code deduplication works
+ PushPointer(code_buffer_, &id);
data_lir = NEXT_LIR(data_lir);
}
data_lir = method_literal_list_;
@@ -411,9 +422,8 @@
static_cast<InvokeType>(data_lir->operands[1]),
code_buffer_.size());
const DexFile::MethodId& id = cu_->dex_file->GetMethodId(target);
- // unique based on target to ensure code deduplication works
- uint32_t unique_patch_value = reinterpret_cast<uint32_t>(&id);
- PushWord(code_buffer_, unique_patch_value);
+ // unique value based on target to ensure code deduplication works
+ PushPointer(code_buffer_, &id);
data_lir = NEXT_LIR(data_lir);
}
}
@@ -449,7 +459,7 @@
LOG(INFO) << "Switch table for offset 0x" << std::hex << bx_offset;
}
if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
- const int* keys = reinterpret_cast<const int*>(&(tab_rec->table[2]));
+ const int32_t* keys = reinterpret_cast<const int32_t*>(&(tab_rec->table[2]));
for (int elems = 0; elems < tab_rec->table[1]; elems++) {
int disp = tab_rec->targets[elems]->offset - bx_offset;
if (cu_->verbose) {
@@ -490,7 +500,7 @@
}
}
-static int AssignLiteralOffsetCommon(LIR* lir, int offset) {
+static int AssignLiteralOffsetCommon(LIR* lir, CodeOffset offset) {
for (; lir != NULL; lir = lir->next) {
lir->offset = offset;
offset += 4;
@@ -498,6 +508,17 @@
return offset;
}
+static int AssignLiteralPointerOffsetCommon(LIR* lir, CodeOffset offset) {
+ unsigned int element_size = sizeof(void*);
+ // Align to natural pointer size.
+ offset = (offset + (element_size - 1)) & ~(element_size - 1);
+ for (; lir != NULL; lir = lir->next) {
+ lir->offset = offset;
+ offset += element_size;
+ }
+ return offset;
+}
+
// Make sure we have a code address for every declared catch entry
bool Mir2Lir::VerifyCatchEntries() {
bool success = true;
@@ -607,8 +628,8 @@
table_index = (table_index + 1) % entries_;
}
in_use_[table_index] = true;
- SetNativeOffset(table_index, native_offset);
- DCHECK_EQ(native_offset, GetNativeOffset(table_index));
+ SetCodeOffset(table_index, native_offset);
+ DCHECK_EQ(native_offset, GetCodeOffset(table_index));
SetReferences(table_index, references);
}
@@ -617,7 +638,7 @@
return NativePcOffsetToReferenceMap::Hash(native_offset) % entries_;
}
- uint32_t GetNativeOffset(size_t table_index) {
+ uint32_t GetCodeOffset(size_t table_index) {
uint32_t native_offset = 0;
size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
for (size_t i = 0; i < native_offset_width_; i++) {
@@ -626,7 +647,7 @@
return native_offset;
}
- void SetNativeOffset(size_t table_index, uint32_t native_offset) {
+ void SetCodeOffset(size_t table_index, uint32_t native_offset) {
size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
for (size_t i = 0; i < native_offset_width_; i++) {
(*table_)[table_offset + i] = (native_offset >> (i * 8)) & 0xFF;
@@ -681,17 +702,17 @@
}
/* Determine the offset of each literal field */
-int Mir2Lir::AssignLiteralOffset(int offset) {
+int Mir2Lir::AssignLiteralOffset(CodeOffset offset) {
offset = AssignLiteralOffsetCommon(literal_list_, offset);
- offset = AssignLiteralOffsetCommon(code_literal_list_, offset);
- offset = AssignLiteralOffsetCommon(method_literal_list_, offset);
+ offset = AssignLiteralPointerOffsetCommon(code_literal_list_, offset);
+ offset = AssignLiteralPointerOffsetCommon(method_literal_list_, offset);
return offset;
}
-int Mir2Lir::AssignSwitchTablesOffset(int offset) {
+int Mir2Lir::AssignSwitchTablesOffset(CodeOffset offset) {
GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
while (true) {
- Mir2Lir::SwitchTable *tab_rec = iterator.Next();
+ Mir2Lir::SwitchTable* tab_rec = iterator.Next();
if (tab_rec == NULL) break;
tab_rec->offset = offset;
if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
@@ -705,7 +726,7 @@
return offset;
}
-int Mir2Lir::AssignFillArrayDataOffset(int offset) {
+int Mir2Lir::AssignFillArrayDataOffset(CodeOffset offset) {
GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_);
while (true) {
Mir2Lir::FillArrayData *tab_rec = iterator.Next();
@@ -725,7 +746,7 @@
* branch table during the assembly phase. All resource flags
* are set to prevent code motion. KeyVal is just there for debugging.
*/
-LIR* Mir2Lir::InsertCaseLabel(int vaddr, int keyVal) {
+LIR* Mir2Lir::InsertCaseLabel(DexOffset vaddr, int keyVal) {
LIR* boundary_lir = &block_label_list_[mir_graph_->FindBlock(vaddr)->id];
LIR* res = boundary_lir;
if (cu_->verbose) {
@@ -743,10 +764,10 @@
return res;
}
-void Mir2Lir::MarkPackedCaseLabels(Mir2Lir::SwitchTable *tab_rec) {
+void Mir2Lir::MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec) {
const uint16_t* table = tab_rec->table;
- int base_vaddr = tab_rec->vaddr;
- const int *targets = reinterpret_cast<const int*>(&table[4]);
+ DexOffset base_vaddr = tab_rec->vaddr;
+ const int32_t *targets = reinterpret_cast<const int32_t*>(&table[4]);
int entries = table[1];
int low_key = s4FromSwitchData(&table[2]);
for (int i = 0; i < entries; i++) {
@@ -754,12 +775,12 @@
}
}
-void Mir2Lir::MarkSparseCaseLabels(Mir2Lir::SwitchTable *tab_rec) {
+void Mir2Lir::MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec) {
const uint16_t* table = tab_rec->table;
- int base_vaddr = tab_rec->vaddr;
+ DexOffset base_vaddr = tab_rec->vaddr;
int entries = table[1];
- const int* keys = reinterpret_cast<const int*>(&table[2]);
- const int* targets = &keys[entries];
+ const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]);
+ const int32_t* targets = &keys[entries];
for (int i = 0; i < entries; i++) {
tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], keys[i]);
}
@@ -792,8 +813,8 @@
*/
uint16_t ident = table[0];
int entries = table[1];
- const int* keys = reinterpret_cast<const int*>(&table[2]);
- const int* targets = &keys[entries];
+ const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]);
+ const int32_t* targets = &keys[entries];
LOG(INFO) << "Sparse switch table - ident:0x" << std::hex << ident
<< ", entries: " << std::dec << entries;
for (int i = 0; i < entries; i++) {
@@ -812,7 +833,7 @@
* Total size is (4+size*2) 16-bit code units.
*/
uint16_t ident = table[0];
- const int* targets = reinterpret_cast<const int*>(&table[4]);
+ const int32_t* targets = reinterpret_cast<const int32_t*>(&table[4]);
int entries = table[1];
int low_key = s4FromSwitchData(&table[2]);
LOG(INFO) << "Packed switch table - ident:0x" << std::hex << ident
@@ -824,8 +845,9 @@
}
/* Set up special LIR to mark a Dalvik byte-code instruction start for pretty printing */
-void Mir2Lir::MarkBoundary(int offset, const char* inst_str) {
- NewLIR1(kPseudoDalvikByteCodeBoundary, reinterpret_cast<uintptr_t>(inst_str));
+void Mir2Lir::MarkBoundary(DexOffset offset, const char* inst_str) {
+ // NOTE: only used for debug listings.
+ NewLIR1(kPseudoDalvikByteCodeBoundary, WrapPointer(ArenaStrdup(inst_str)));
}
bool Mir2Lir::EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src2) {
@@ -883,6 +905,7 @@
intrinsic_launchpads_(arena, 2048, kGrowableArrayMisc),
tempreg_info_(arena, 20, kGrowableArrayMisc),
reginfo_map_(arena, 64, kGrowableArrayMisc),
+ pointer_storage_(arena, 128, kGrowableArrayMisc),
data_offset_(0),
total_size_(0),
block_label_list_(NULL),
@@ -900,9 +923,13 @@
promotion_map_ = static_cast<PromotionMap*>
(arena_->Alloc((cu_->num_dalvik_registers + cu_->num_compiler_temps + 1) *
sizeof(promotion_map_[0]), ArenaAllocator::kAllocRegAlloc));
+ // Reserve pointer id 0 for NULL.
+ size_t null_idx = WrapPointer(NULL);
+ DCHECK_EQ(null_idx, 0U);
}
void Mir2Lir::Materialize() {
+ cu_->NewTimingSplit("RegisterAllocation");
CompilerInitializeRegAlloc(); // Needs to happen after SSA naming
/* Allocate Registers using simple local allocation scheme */
@@ -914,6 +941,7 @@
* special codegen doesn't succeed, first_lir_insn_ will
* set to NULL;
*/
+ cu_->NewTimingSplit("SpecialMIR2LIR");
SpecialMIR2LIR(mir_graph_->GetSpecialCase());
}
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 2670c23..2b3404a 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -40,7 +40,7 @@
barrier->u.m.def_mask = ENCODE_ALL;
}
-// FIXME: need to do some work to split out targets with
+// TODO: need to do some work to split out targets with
// condition codes and those without
LIR* Mir2Lir::GenCheck(ConditionCode c_code, ThrowKind kind) {
DCHECK_NE(cu_->instruction_set, kMips);
@@ -503,7 +503,7 @@
ResetRegPool();
ResetDefTracking();
LIR* lab = suspend_launchpads_.Get(i);
- LIR* resume_lab = reinterpret_cast<LIR*>(lab->operands[0]);
+ LIR* resume_lab = reinterpret_cast<LIR*>(UnwrapPointer(lab->operands[0]));
current_dalvik_offset_ = lab->operands[1];
AppendLIR(lab);
int r_tgt = CallHelperSetup(helper_offset);
@@ -518,12 +518,12 @@
ResetRegPool();
ResetDefTracking();
LIR* lab = intrinsic_launchpads_.Get(i);
- CallInfo* info = reinterpret_cast<CallInfo*>(lab->operands[0]);
+ CallInfo* info = reinterpret_cast<CallInfo*>(UnwrapPointer(lab->operands[0]));
current_dalvik_offset_ = info->offset;
AppendLIR(lab);
// NOTE: GenInvoke handles MarkSafepointPC
GenInvoke(info);
- LIR* resume_lab = reinterpret_cast<LIR*>(lab->operands[2]);
+ LIR* resume_lab = reinterpret_cast<LIR*>(UnwrapPointer(lab->operands[2]));
if (resume_lab != NULL) {
OpUnconditionalBranch(resume_lab);
}
@@ -1351,7 +1351,7 @@
}
// Returns the index of the lowest set bit in 'x'.
-static int LowestSetBit(unsigned int x) {
+static int32_t LowestSetBit(uint32_t x) {
int bit_posn = 0;
while ((x & 0xf) == 0) {
bit_posn += 4;
@@ -1752,8 +1752,8 @@
FlushAllRegs();
LIR* branch = OpTestSuspend(NULL);
LIR* ret_lab = NewLIR0(kPseudoTargetLabel);
- LIR* target = RawLIR(current_dalvik_offset_, kPseudoSuspendTarget,
- reinterpret_cast<uintptr_t>(ret_lab), current_dalvik_offset_);
+ LIR* target = RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(ret_lab),
+ current_dalvik_offset_);
branch->target = target;
suspend_launchpads_.Insert(target);
}
@@ -1766,8 +1766,8 @@
}
OpTestSuspend(target);
LIR* launch_pad =
- RawLIR(current_dalvik_offset_, kPseudoSuspendTarget,
- reinterpret_cast<uintptr_t>(target), current_dalvik_offset_);
+ RawLIR(current_dalvik_offset_, kPseudoSuspendTarget, WrapPointer(target),
+ current_dalvik_offset_);
FlushAllRegs();
OpUnconditionalBranch(launch_pad);
suspend_launchpads_.Insert(launch_pad);
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index f0623b0..62feade 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -350,16 +350,13 @@
uintptr_t direct_code, uintptr_t direct_method,
InvokeType type) {
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
- if (cu->instruction_set != kThumb2) {
- // Disable sharpening
- direct_code = 0;
- direct_method = 0;
- }
if (direct_code != 0 && direct_method != 0) {
switch (state) {
case 0: // Get the current Method* [sets kArg0]
if (direct_code != static_cast<unsigned int>(-1)) {
- cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
+ if (cu->instruction_set != kX86) {
+ cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
+ }
} else {
CHECK_EQ(cu->dex_file, target_method.dex_file);
LIR* data_target = cg->ScanLiteralPool(cg->code_literal_list_,
@@ -405,6 +402,7 @@
cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
} else {
CHECK_EQ(cu->dex_file, target_method.dex_file);
+ CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
LIR* data_target = cg->ScanLiteralPool(cg->code_literal_list_,
target_method.dex_method_index, 0);
if (data_target == NULL) {
@@ -501,10 +499,6 @@
uint32_t unused, uintptr_t unused2,
uintptr_t direct_method, InvokeType unused4) {
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
- if (cu->instruction_set != kThumb2) {
- // Disable sharpening
- direct_method = 0;
- }
ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline);
if (direct_method != 0) {
@@ -908,7 +902,7 @@
LoadWordDisp(rl_obj.low_reg, value_offset, reg_ptr);
if (range_check) {
// Set up a launch pad to allow retry in case of bounds violation */
- launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
+ launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
intrinsic_launchpads_.Insert(launch_pad);
OpRegReg(kOpCmp, rl_idx.low_reg, reg_max);
FreeTemp(reg_max);
@@ -919,7 +913,7 @@
reg_max = AllocTemp();
LoadWordDisp(rl_obj.low_reg, count_offset, reg_max);
// Set up a launch pad to allow retry in case of bounds violation */
- launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
+ launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
intrinsic_launchpads_.Insert(launch_pad);
OpRegReg(kOpCmp, rl_idx.low_reg, reg_max);
FreeTemp(reg_max);
@@ -1085,7 +1079,7 @@
}
int r_tgt = (cu_->instruction_set != kX86) ? LoadHelper(QUICK_ENTRYPOINT_OFFSET(pIndexOf)) : 0;
GenNullCheck(rl_obj.s_reg_low, reg_ptr, info->opt_flags);
- LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
+ LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
intrinsic_launchpads_.Insert(launch_pad);
OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, launch_pad);
// NOTE: not a safepoint
@@ -1095,7 +1089,7 @@
OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pIndexOf));
}
LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
- launch_pad->operands[2] = reinterpret_cast<uintptr_t>(resume_tgt);
+ launch_pad->operands[2] = WrapPointer(resume_tgt);
// Record that we've already inlined & null checked
info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
RegLocation rl_return = GetReturn(false);
@@ -1123,7 +1117,7 @@
LoadHelper(QUICK_ENTRYPOINT_OFFSET(pStringCompareTo)) : 0;
GenNullCheck(rl_this.s_reg_low, reg_this, info->opt_flags);
// TUNING: check if rl_cmp.s_reg_low is already null checked
- LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
+ LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
intrinsic_launchpads_.Insert(launch_pad);
OpCmpImmBranch(kCondEq, reg_cmp, 0, launch_pad);
// NOTE: not a safepoint
diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc
index 6bfccfd..5f5e5e4 100644
--- a/compiler/dex/quick/mips/assemble_mips.cc
+++ b/compiler/dex/quick/mips/assemble_mips.cc
@@ -489,12 +489,12 @@
LIR* curr_pc = RawLIR(dalvik_offset, kMipsCurrPC);
InsertLIRBefore(lir, curr_pc);
LIR* anchor = RawLIR(dalvik_offset, kPseudoTargetLabel);
- LIR* delta_hi = RawLIR(dalvik_offset, kMipsDeltaHi, r_AT, 0,
- reinterpret_cast<uintptr_t>(anchor), 0, 0, lir->target);
+ LIR* delta_hi = RawLIR(dalvik_offset, kMipsDeltaHi, r_AT, 0, WrapPointer(anchor), 0, 0,
+ lir->target);
InsertLIRBefore(lir, delta_hi);
InsertLIRBefore(lir, anchor);
- LIR* delta_lo = RawLIR(dalvik_offset, kMipsDeltaLo, r_AT, 0,
- reinterpret_cast<uintptr_t>(anchor), 0, 0, lir->target);
+ LIR* delta_lo = RawLIR(dalvik_offset, kMipsDeltaLo, r_AT, 0, WrapPointer(anchor), 0, 0,
+ lir->target);
InsertLIRBefore(lir, delta_lo);
LIR* addu = RawLIR(dalvik_offset, kMipsAddu, r_AT, r_AT, r_RA);
InsertLIRBefore(lir, addu);
@@ -512,7 +512,7 @@
* instruction. In those cases we will try to substitute a new code
* sequence or request that the trace be shortened and retried.
*/
-AssemblerStatus MipsMir2Lir::AssembleInstructions(uintptr_t start_addr) {
+AssemblerStatus MipsMir2Lir::AssembleInstructions(CodeOffset start_addr) {
LIR *lir;
AssemblerStatus res = kSuccess; // Assume success
@@ -538,8 +538,8 @@
* and is found in lir->target. If operands[3] is non-NULL,
* then it is a Switch/Data table.
*/
- int offset1 = (reinterpret_cast<LIR*>(lir->operands[2]))->offset;
- SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+ int offset1 = (reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2])))->offset;
+ EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
int delta = offset2 - offset1;
if ((delta & 0xffff) == delta && ((delta & 0x8000) == 0)) {
@@ -565,21 +565,21 @@
res = kRetryAll;
}
} else if (lir->opcode == kMipsDeltaLo) {
- int offset1 = (reinterpret_cast<LIR*>(lir->operands[2]))->offset;
- SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+ int offset1 = (reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2])))->offset;
+ EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
int delta = offset2 - offset1;
lir->operands[1] = delta & 0xffff;
} else if (lir->opcode == kMipsDeltaHi) {
- int offset1 = (reinterpret_cast<LIR*>(lir->operands[2]))->offset;
- SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+ int offset1 = (reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2])))->offset;
+ EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
int delta = offset2 - offset1;
lir->operands[1] = (delta >> 16) & 0xffff;
} else if (lir->opcode == kMipsB || lir->opcode == kMipsBal) {
LIR *target_lir = lir->target;
- uintptr_t pc = lir->offset + 4;
- uintptr_t target = target_lir->offset;
+ CodeOffset pc = lir->offset + 4;
+ CodeOffset target = target_lir->offset;
int delta = target - pc;
if (delta & 0x3) {
LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
@@ -592,8 +592,8 @@
}
} else if (lir->opcode >= kMipsBeqz && lir->opcode <= kMipsBnez) {
LIR *target_lir = lir->target;
- uintptr_t pc = lir->offset + 4;
- uintptr_t target = target_lir->offset;
+ CodeOffset pc = lir->offset + 4;
+ CodeOffset target = target_lir->offset;
int delta = target - pc;
if (delta & 0x3) {
LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
@@ -606,8 +606,8 @@
}
} else if (lir->opcode == kMipsBeq || lir->opcode == kMipsBne) {
LIR *target_lir = lir->target;
- uintptr_t pc = lir->offset + 4;
- uintptr_t target = target_lir->offset;
+ CodeOffset pc = lir->offset + 4;
+ CodeOffset target = target_lir->offset;
int delta = target - pc;
if (delta & 0x3) {
LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
@@ -619,8 +619,8 @@
lir->operands[2] = delta >> 2;
}
} else if (lir->opcode == kMipsJal) {
- uintptr_t cur_pc = (start_addr + lir->offset + 4) & ~3;
- uintptr_t target = lir->operands[0];
+ CodeOffset cur_pc = (start_addr + lir->offset + 4) & ~3;
+ CodeOffset target = lir->operands[0];
/* ensure PC-region branch can be used */
DCHECK_EQ((cur_pc & 0xF0000000), (target & 0xF0000000));
if (target & 0x3) {
@@ -629,11 +629,11 @@
lir->operands[0] = target >> 2;
} else if (lir->opcode == kMipsLahi) { /* ld address hi (via lui) */
LIR *target_lir = lir->target;
- uintptr_t target = start_addr + target_lir->offset;
+ CodeOffset target = start_addr + target_lir->offset;
lir->operands[1] = target >> 16;
} else if (lir->opcode == kMipsLalo) { /* ld address lo (via ori) */
LIR *target_lir = lir->target;
- uintptr_t target = start_addr + target_lir->offset;
+ CodeOffset target = start_addr + target_lir->offset;
lir->operands[2] = lir->operands[2] + target;
}
}
@@ -768,6 +768,7 @@
* TODO: consolidate w/ Arm assembly mechanism.
*/
void MipsMir2Lir::AssembleLIR() {
+ cu_->NewTimingSplit("Assemble");
AssignOffsets();
int assembler_retries = 0;
/*
@@ -792,6 +793,7 @@
}
// Install literals
+ cu_->NewTimingSplit("LiteralData");
InstallLiteralPools();
// Install switch tables
@@ -801,8 +803,10 @@
InstallFillArrayData();
// Create the mapping table and native offset to reference map.
+ cu_->NewTimingSplit("PcMappingTable");
CreateMappingTables();
+ cu_->NewTimingSplit("GcMap");
CreateNativeGcMap();
}
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 9a5ca2c..18c8cf8 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -59,14 +59,14 @@
* done:
*
*/
-void MipsMir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset,
+void MipsMir2Lir::GenSparseSwitch(MIR* mir, DexOffset table_offset,
RegLocation rl_src) {
const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
if (cu_->verbose) {
DumpSparseSwitchTable(table);
}
// Add the table to the list - we'll process it later
- SwitchTable *tab_rec =
+ SwitchTable* tab_rec =
static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
@@ -101,8 +101,7 @@
// Remember base label so offsets can be computed later
tab_rec->anchor = base_label;
int rBase = AllocTemp();
- NewLIR4(kMipsDelta, rBase, 0, reinterpret_cast<uintptr_t>(base_label),
- reinterpret_cast<uintptr_t>(tab_rec));
+ NewLIR4(kMipsDelta, rBase, 0, WrapPointer(base_label), WrapPointer(tab_rec));
OpRegRegReg(kOpAdd, rEnd, rEnd, rBase);
// Grab switch test value
@@ -138,20 +137,20 @@
* jr r_RA
* done:
*/
-void MipsMir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset,
+void MipsMir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset,
RegLocation rl_src) {
const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
if (cu_->verbose) {
DumpPackedSwitchTable(table);
}
// Add the table to the list - we'll process it later
- SwitchTable *tab_rec =
+ SwitchTable* tab_rec =
static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
int size = table[1];
tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*),
- ArenaAllocator::kAllocLIR));
+ ArenaAllocator::kAllocLIR));
switch_tables_.Insert(tab_rec);
// Get the switch value
@@ -196,8 +195,7 @@
// Materialize the table base pointer
int rBase = AllocTemp();
- NewLIR4(kMipsDelta, rBase, 0, reinterpret_cast<uintptr_t>(base_label),
- reinterpret_cast<uintptr_t>(tab_rec));
+ NewLIR4(kMipsDelta, rBase, 0, WrapPointer(base_label), WrapPointer(tab_rec));
// Load the displacement from the switch table
int r_disp = AllocTemp();
@@ -222,10 +220,10 @@
*
* Total size is 4+(width * size + 1)/2 16-bit code units.
*/
-void MipsMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) {
+void MipsMir2Lir::GenFillArrayData(DexOffset table_offset, RegLocation rl_src) {
const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
// Add the table to the list - we'll process it later
- FillArrayData *tab_rec =
+ FillArrayData* tab_rec =
reinterpret_cast<FillArrayData*>(arena_->Alloc(sizeof(FillArrayData),
ArenaAllocator::kAllocData));
tab_rec->table = table;
@@ -252,8 +250,7 @@
LIR* base_label = NewLIR0(kPseudoTargetLabel);
// Materialize a pointer to the fill data image
- NewLIR4(kMipsDelta, rMIPS_ARG1, 0, reinterpret_cast<uintptr_t>(base_label),
- reinterpret_cast<uintptr_t>(tab_rec));
+ NewLIR4(kMipsDelta, rMIPS_ARG1, 0, WrapPointer(base_label), WrapPointer(tab_rec));
// And go...
ClobberCalleeSave();
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 387fef3..0be20e8 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -74,7 +74,7 @@
void AssembleLIR();
int AssignInsnOffsets();
void AssignOffsets();
- AssemblerStatus AssembleInstructions(uintptr_t start_addr);
+ AssemblerStatus AssembleInstructions(CodeOffset start_addr);
void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
void SetupTargetResourceMasks(LIR* lir, uint64_t flags);
const char* GetTargetInstFmt(int opcode);
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 5d9ae33..2ba2c84 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -93,7 +93,7 @@
} else if ((value < 0) && (value >= -32768)) {
res = NewLIR3(kMipsAddiu, r_dest, r_ZERO, value);
} else {
- res = NewLIR2(kMipsLui, r_dest, value>>16);
+ res = NewLIR2(kMipsLui, r_dest, value >> 16);
if (value & 0xffff)
NewLIR3(kMipsOri, r_dest, r_dest, value);
}
diff --git a/compiler/dex/quick/mir_to_lir-inl.h b/compiler/dex/quick/mir_to_lir-inl.h
index f293700..1a30b7a 100644
--- a/compiler/dex/quick/mir_to_lir-inl.h
+++ b/compiler/dex/quick/mir_to_lir-inl.h
@@ -43,7 +43,7 @@
}
}
-inline LIR* Mir2Lir::RawLIR(int dalvik_offset, int opcode, int op0,
+inline LIR* Mir2Lir::RawLIR(DexOffset dalvik_offset, int opcode, int op0,
int op1, int op2, int op3, int op4, LIR* target) {
LIR* insn = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), ArenaAllocator::kAllocLIR));
insn->dalvik_offset = dalvik_offset;
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 2b26c3d..fa9a3ad 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -241,9 +241,9 @@
case Instruction::GOTO_16:
case Instruction::GOTO_32:
if (mir_graph_->IsBackedge(bb, bb->taken)) {
- GenSuspendTestAndBranch(opt_flags, &label_list[bb->taken->id]);
+ GenSuspendTestAndBranch(opt_flags, &label_list[bb->taken]);
} else {
- OpUnconditionalBranch(&label_list[bb->taken->id]);
+ OpUnconditionalBranch(&label_list[bb->taken]);
}
break;
@@ -272,23 +272,22 @@
case Instruction::IF_GE:
case Instruction::IF_GT:
case Instruction::IF_LE: {
- LIR* taken = &label_list[bb->taken->id];
- LIR* fall_through = &label_list[bb->fall_through->id];
+ LIR* taken = &label_list[bb->taken];
+ LIR* fall_through = &label_list[bb->fall_through];
// Result known at compile time?
if (rl_src[0].is_const && rl_src[1].is_const) {
bool is_taken = EvaluateBranch(opcode, mir_graph_->ConstantValue(rl_src[0].orig_sreg),
mir_graph_->ConstantValue(rl_src[1].orig_sreg));
- BasicBlock* target = is_taken ? bb->taken : bb->fall_through;
- if (mir_graph_->IsBackedge(bb, target)) {
+ BasicBlockId target_id = is_taken ? bb->taken : bb->fall_through;
+ if (mir_graph_->IsBackedge(bb, target_id)) {
GenSuspendTest(opt_flags);
}
- OpUnconditionalBranch(&label_list[target->id]);
+ OpUnconditionalBranch(&label_list[target_id]);
} else {
if (mir_graph_->IsBackwardsBranch(bb)) {
GenSuspendTest(opt_flags);
}
- GenCompareAndBranch(opcode, rl_src[0], rl_src[1], taken,
- fall_through);
+ GenCompareAndBranch(opcode, rl_src[0], rl_src[1], taken, fall_through);
}
break;
}
@@ -299,16 +298,16 @@
case Instruction::IF_GEZ:
case Instruction::IF_GTZ:
case Instruction::IF_LEZ: {
- LIR* taken = &label_list[bb->taken->id];
- LIR* fall_through = &label_list[bb->fall_through->id];
+ LIR* taken = &label_list[bb->taken];
+ LIR* fall_through = &label_list[bb->fall_through];
// Result known at compile time?
if (rl_src[0].is_const) {
bool is_taken = EvaluateBranch(opcode, mir_graph_->ConstantValue(rl_src[0].orig_sreg), 0);
- BasicBlock* target = is_taken ? bb->taken : bb->fall_through;
- if (mir_graph_->IsBackedge(bb, target)) {
+ BasicBlockId target_id = is_taken ? bb->taken : bb->fall_through;
+ if (mir_graph_->IsBackedge(bb, target_id)) {
GenSuspendTest(opt_flags);
}
- OpUnconditionalBranch(&label_list[target->id]);
+ OpUnconditionalBranch(&label_list[target_id]);
} else {
if (mir_graph_->IsBackwardsBranch(bb)) {
GenSuspendTest(opt_flags);
@@ -820,6 +819,8 @@
}
void Mir2Lir::MethodMIR2LIR() {
+ cu_->NewTimingSplit("MIR2LIR");
+
// Hold the labels of each block.
block_label_list_ =
static_cast<LIR*>(arena_->Alloc(sizeof(LIR) * mir_graph_->GetNumBlocks(),
@@ -831,15 +832,16 @@
while (curr_bb != NULL) {
MethodBlockCodeGen(curr_bb);
// If the fall_through block is no longer laid out consecutively, drop in a branch.
- if ((curr_bb->fall_through != NULL) && (curr_bb->fall_through != next_bb)) {
- OpUnconditionalBranch(&block_label_list_[curr_bb->fall_through->id]);
+ BasicBlock* curr_bb_fall_through = mir_graph_->GetBasicBlock(curr_bb->fall_through);
+ if ((curr_bb_fall_through != NULL) && (curr_bb_fall_through != next_bb)) {
+ OpUnconditionalBranch(&block_label_list_[curr_bb->fall_through]);
}
curr_bb = next_bb;
do {
next_bb = iter.Next();
} while ((next_bb != NULL) && (next_bb->block_type == kDead));
}
-
+ cu_->NewTimingSplit("Launchpads");
HandleSuspendLaunchPads();
HandleThrowLaunchPads();
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 5df2672..d629b44 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -30,6 +30,14 @@
namespace art {
+/*
+ * TODO: refactoring pass to move these (and other) typdefs towards usage style of runtime to
+ * add type safety (see runtime/offsets.h).
+ */
+typedef uint32_t DexOffset; // Dex offset in code units.
+typedef uint16_t NarrowDexOffset; // For use in structs, Dex offsets range from 0 .. 0xffff.
+typedef uint32_t CodeOffset; // Native code offset in bytes.
+
// Set to 1 to measure cost of suspend check.
#define NO_SUSPEND 0
@@ -119,8 +127,8 @@
};
struct LIR {
- int offset; // Offset of this instruction.
- uint16_t dalvik_offset; // Offset of Dalvik opcode in code units (16-bit words).
+ CodeOffset offset; // Offset of this instruction.
+ NarrowDexOffset dalvik_offset; // Offset of Dalvik opcode in code units (16-bit words).
int16_t opcode;
LIR* next;
LIR* prev;
@@ -134,10 +142,10 @@
unsigned int fixup:8; // Fixup kind.
} flags;
union {
- UseDefMasks m; // Use & Def masks used during optimization.
- AssemblyInfo a; // Instruction encoding used during assembly phase.
+ UseDefMasks m; // Use & Def masks used during optimization.
+ AssemblyInfo a; // Instruction encoding used during assembly phase.
} u;
- int operands[5]; // [0..4] = [dest, src1, src2, extra, extra2].
+ int32_t operands[5]; // [0..4] = [dest, src1, src2, extra, extra2].
};
// Target-specific initialization.
@@ -184,19 +192,23 @@
class Mir2Lir : public Backend {
public:
- struct SwitchTable {
- int offset;
- const uint16_t* table; // Original dex table.
- int vaddr; // Dalvik offset of switch opcode.
- LIR* anchor; // Reference instruction for relative offsets.
- LIR** targets; // Array of case targets.
+ /*
+ * Auxiliary information describing the location of data embedded in the Dalvik
+ * byte code stream.
+ */
+ struct EmbeddedData {
+ CodeOffset offset; // Code offset of data block.
+ const uint16_t* table; // Original dex data.
+ DexOffset vaddr; // Dalvik offset of parent opcode.
};
- struct FillArrayData {
- int offset;
- const uint16_t* table; // Original dex table.
- int size;
- int vaddr; // Dalvik offset of FILL_ARRAY_DATA opcode.
+ struct FillArrayData : EmbeddedData {
+ int32_t size;
+ };
+
+ struct SwitchTable : EmbeddedData {
+ LIR* anchor; // Reference instruction for relative offsets.
+ LIR** targets; // Array of case targets.
};
/* Static register use counts */
@@ -260,6 +272,34 @@
return (opcode < 0);
}
+ /*
+ * LIR operands are 32-bit integers. Sometimes, (especially for managing
+ * instructions which require PC-relative fixups), we need the operands to carry
+ * pointers. To do this, we assign these pointers an index in pointer_storage_, and
+ * hold that index in the operand array.
+ * TUNING: If use of these utilities becomes more common on 32-bit builds, it
+ * may be worth conditionally-compiling a set of identity functions here.
+ */
+ uint32_t WrapPointer(void* pointer) {
+ uint32_t res = pointer_storage_.Size();
+ pointer_storage_.Insert(pointer);
+ return res;
+ }
+
+ void* UnwrapPointer(size_t index) {
+ return pointer_storage_.Get(index);
+ }
+
+ // strdup(), but allocates from the arena.
+ char* ArenaStrdup(const char* str) {
+ size_t len = strlen(str) + 1;
+ char* res = reinterpret_cast<char*>(arena_->Alloc(len, ArenaAllocator::kAllocMisc));
+ if (res != NULL) {
+ strncpy(res, str, len);
+ }
+ return res;
+ }
+
// Shared by all targets - implemented in codegen_util.cc
void AppendLIR(LIR* lir);
void InsertLIRBefore(LIR* current_lir, LIR* new_lir);
@@ -277,7 +317,7 @@
void DumpLIRInsn(LIR* arg, unsigned char* base_addr);
void DumpPromotionMap();
void CodegenDump();
- LIR* RawLIR(int dalvik_offset, int opcode, int op0 = 0, int op1 = 0,
+ LIR* RawLIR(DexOffset dalvik_offset, int opcode, int op0 = 0, int op1 = 0,
int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = NULL);
LIR* NewLIR0(int opcode);
LIR* NewLIR1(int opcode, int dest);
@@ -292,7 +332,7 @@
void ProcessSwitchTables();
void DumpSparseSwitchTable(const uint16_t* table);
void DumpPackedSwitchTable(const uint16_t* table);
- void MarkBoundary(int offset, const char* inst_str);
+ void MarkBoundary(DexOffset offset, const char* inst_str);
void NopLIR(LIR* lir);
void UnlinkLIR(LIR* lir);
bool EvaluateBranch(Instruction::Code opcode, int src1, int src2);
@@ -307,12 +347,12 @@
bool VerifyCatchEntries();
void CreateMappingTables();
void CreateNativeGcMap();
- int AssignLiteralOffset(int offset);
- int AssignSwitchTablesOffset(int offset);
- int AssignFillArrayDataOffset(int offset);
- LIR* InsertCaseLabel(int vaddr, int keyVal);
- void MarkPackedCaseLabels(Mir2Lir::SwitchTable *tab_rec);
- void MarkSparseCaseLabels(Mir2Lir::SwitchTable *tab_rec);
+ int AssignLiteralOffset(CodeOffset offset);
+ int AssignSwitchTablesOffset(CodeOffset offset);
+ int AssignFillArrayDataOffset(CodeOffset offset);
+ LIR* InsertCaseLabel(DexOffset vaddr, int keyVal);
+ void MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec);
+ void MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec);
// Shared by all targets - implemented in local_optimizations.cc
void ConvertMemOpIntoMove(LIR* orig_lir, int dest, int src);
@@ -642,7 +682,7 @@
virtual void GenEntrySequence(RegLocation* ArgLocs,
RegLocation rl_method) = 0;
virtual void GenExitSequence() = 0;
- virtual void GenFillArrayData(uint32_t table_offset,
+ virtual void GenFillArrayData(DexOffset table_offset,
RegLocation rl_src) = 0;
virtual void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
bool is_double) = 0;
@@ -655,9 +695,9 @@
int second_bit) = 0;
virtual void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) = 0;
virtual void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) = 0;
- virtual void GenPackedSwitch(MIR* mir, uint32_t table_offset,
+ virtual void GenPackedSwitch(MIR* mir, DexOffset table_offset,
RegLocation rl_src) = 0;
- virtual void GenSparseSwitch(MIR* mir, uint32_t table_offset,
+ virtual void GenSparseSwitch(MIR* mir, DexOffset table_offset,
RegLocation rl_src) = 0;
virtual void GenSpecialCase(BasicBlock* bb, MIR* mir,
SpecialCaseHandler special_case) = 0;
@@ -672,13 +712,10 @@
// Required for target - single operation generators.
virtual LIR* OpUnconditionalBranch(LIR* target) = 0;
- virtual LIR* OpCmpBranch(ConditionCode cond, int src1, int src2,
- LIR* target) = 0;
- virtual LIR* OpCmpImmBranch(ConditionCode cond, int reg, int check_value,
- LIR* target) = 0;
+ virtual LIR* OpCmpBranch(ConditionCode cond, int src1, int src2, LIR* target) = 0;
+ virtual LIR* OpCmpImmBranch(ConditionCode cond, int reg, int check_value, LIR* target) = 0;
virtual LIR* OpCondBranch(ConditionCode cc, LIR* target) = 0;
- virtual LIR* OpDecAndBranch(ConditionCode c_code, int reg,
- LIR* target) = 0;
+ virtual LIR* OpDecAndBranch(ConditionCode c_code, int reg, LIR* target) = 0;
virtual LIR* OpFpRegCopy(int r_dest, int r_src) = 0;
virtual LIR* OpIT(ConditionCode cond, const char* guide) = 0;
virtual LIR* OpMem(OpKind op, int rBase, int disp) = 0;
@@ -690,16 +727,13 @@
virtual LIR* OpRegMem(OpKind op, int r_dest, int rBase, int offset) = 0;
virtual LIR* OpRegReg(OpKind op, int r_dest_src1, int r_src2) = 0;
virtual LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) = 0;
- virtual LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1,
- int r_src2) = 0;
+ virtual LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2) = 0;
virtual LIR* OpTestSuspend(LIR* target) = 0;
virtual LIR* OpThreadMem(OpKind op, ThreadOffset thread_offset) = 0;
virtual LIR* OpVldm(int rBase, int count) = 0;
virtual LIR* OpVstm(int rBase, int count) = 0;
- virtual void OpLea(int rBase, int reg1, int reg2, int scale,
- int offset) = 0;
- virtual void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo,
- int src_hi) = 0;
+ virtual void OpLea(int rBase, int reg1, int reg2, int scale, int offset) = 0;
+ virtual void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi) = 0;
virtual void OpTlsCmp(ThreadOffset offset, int val) = 0;
virtual bool InexpensiveConstantInt(int32_t value) = 0;
virtual bool InexpensiveConstantFloat(int32_t value) = 0;
@@ -752,6 +786,7 @@
GrowableArray<LIR*> intrinsic_launchpads_;
GrowableArray<RegisterInfo*> tempreg_info_;
GrowableArray<RegisterInfo*> reginfo_map_;
+ GrowableArray<void*> pointer_storage_;
/*
* Holds mapping from native PC to dex PC for safepoints where we may deoptimize.
* Native PC is on the return address of the safepointed operation. Dex PC is for
@@ -763,9 +798,9 @@
* immediately preceed the instruction.
*/
std::vector<uint32_t> dex2pc_mapping_table_;
- int current_code_offset_; // Working byte offset of machine instructons.
- int data_offset_; // starting offset of literal pool.
- int total_size_; // header + code size.
+ CodeOffset current_code_offset_; // Working byte offset of machine instructons.
+ CodeOffset data_offset_; // starting offset of literal pool.
+ size_t total_size_; // header + code size.
LIR* block_label_list_;
PromotionMap* promotion_map_;
/*
@@ -777,8 +812,8 @@
* in the CompilationUnit struct before codegen for each instruction.
* The low-level LIR creation utilites will pull it from here. Rework this.
*/
- int current_dalvik_offset_;
- int estimated_native_code_size_; // Just an estimate; used to reserve code_buffer_ size.
+ DexOffset current_dalvik_offset_;
+ size_t estimated_native_code_size_; // Just an estimate; used to reserve code_buffer_ size.
RegisterPool* reg_pool_;
/*
* Sanity checking for the register temp tracking. The same ssa
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 7927ff9..41a57af 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -66,10 +66,9 @@
LOG(INFO) << "================================================";
for (int i = 0; i < num_regs; i++) {
LOG(INFO) << StringPrintf(
- "R[%d]: T:%d, U:%d, P:%d, p:%d, LV:%d, D:%d, SR:%d, ST:%x, EN:%x",
+ "R[%d]: T:%d, U:%d, P:%d, p:%d, LV:%d, D:%d, SR:%d",
p[i].reg, p[i].is_temp, p[i].in_use, p[i].pair, p[i].partner,
- p[i].live, p[i].dirty, p[i].s_reg, reinterpret_cast<uintptr_t>(p[i].def_start),
- reinterpret_cast<uintptr_t>(p[i].def_end));
+ p[i].live, p[i].dirty, p[i].s_reg);
}
LOG(INFO) << "================================================";
}
@@ -769,9 +768,9 @@
RegLocation Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) {
DCHECK(loc.wide);
- int new_regs;
- int low_reg;
- int high_reg;
+ int32_t new_regs;
+ int32_t low_reg;
+ int32_t high_reg;
loc = UpdateLocWide(loc);
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index 064ff31..2047f30 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -246,6 +246,8 @@
UNARY_ENCODING_MAP(Idivmod, 0x7, 0, SETS_CCODES, DaR, kRegRegReg, IS_UNARY_OP | REG_USE0, DaM, kRegRegMem, IS_BINARY_OP | REG_USE0, DaA, kRegRegArray, IS_QUAD_OP | REG_USE01, 0, REG_DEFA_USEA, REG_DEFAD_USEAD, REG_DEFAD_USEAD, "ah:al,ax,", "dx:ax,dx:ax,", "edx:eax,edx:eax,"),
#undef UNARY_ENCODING_MAP
+ { kX86Bswap32R, kRegOpcode, IS_UNARY_OP | REG_DEF0_USE0, { 0, 0, 0x0F, 0xC8, 0, 0, 0, 0 }, "Bswap32R", "!0r" },
+
#define EXT_0F_ENCODING_MAP(opname, prefix, opcode, reg_def) \
{ kX86 ## opname ## RR, kRegReg, IS_BINARY_OP | reg_def | REG_USE01, { prefix, 0, 0x0F, opcode, 0, 0, 0, 0 }, #opname "RR", "!0r,!1r" }, \
{ kX86 ## opname ## RM, kRegMem, IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE01, { prefix, 0, 0x0F, opcode, 0, 0, 0, 0 }, #opname "RM", "!0r,[!1r+!2d]" }, \
@@ -371,6 +373,8 @@
return lir->operands[0]; // length of nop is sole operand
case kNullary:
return 1; // 1 byte of opcode
+ case kRegOpcode: // lir operands - 0: reg
+ return ComputeSize(entry, 0, 0, false) - 1; // substract 1 for modrm
case kReg: // lir operands - 0: reg
return ComputeSize(entry, 0, 0, false);
case kMem: // lir operands - 0: base, 1: disp
@@ -514,6 +518,33 @@
}
}
+void X86Mir2Lir::EmitOpRegOpcode(const X86EncodingMap* entry, uint8_t reg) {
+ if (entry->skeleton.prefix1 != 0) {
+ code_buffer_.push_back(entry->skeleton.prefix1);
+ if (entry->skeleton.prefix2 != 0) {
+ code_buffer_.push_back(entry->skeleton.prefix2);
+ }
+ } else {
+ DCHECK_EQ(0, entry->skeleton.prefix2);
+ }
+ code_buffer_.push_back(entry->skeleton.opcode);
+ if (entry->skeleton.opcode == 0x0F) {
+ code_buffer_.push_back(entry->skeleton.extra_opcode1);
+ // There's no 3-byte instruction with +rd
+ DCHECK_NE(0x38, entry->skeleton.extra_opcode1);
+ DCHECK_NE(0x3A, entry->skeleton.extra_opcode1);
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ } else {
+ DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ }
+ DCHECK(!X86_FPREG(reg));
+ DCHECK_LT(reg, 8);
+ code_buffer_.back() += reg;
+ DCHECK_EQ(0, entry->skeleton.ax_opcode);
+ DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
void X86Mir2Lir::EmitOpReg(const X86EncodingMap* entry, uint8_t reg) {
if (entry->skeleton.prefix1 != 0) {
code_buffer_.push_back(entry->skeleton.prefix1);
@@ -526,7 +557,7 @@
code_buffer_.push_back(entry->skeleton.opcode);
if (entry->skeleton.opcode == 0x0F) {
code_buffer_.push_back(entry->skeleton.extra_opcode1);
- if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+ if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode1 == 0x3A) {
code_buffer_.push_back(entry->skeleton.extra_opcode2);
} else {
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
@@ -583,7 +614,7 @@
code_buffer_.push_back(entry->skeleton.opcode);
if (entry->skeleton.opcode == 0x0F) {
code_buffer_.push_back(entry->skeleton.extra_opcode1);
- if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+ if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode1 == 0x3A) {
code_buffer_.push_back(entry->skeleton.extra_opcode2);
} else {
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
@@ -596,7 +627,9 @@
reg = reg & X86_FP_REG_MASK;
}
if (reg >= 4) {
- DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg)
+ DCHECK(strchr(entry->name, '8') == NULL ||
+ entry->opcode == kX86Movzx8RM || entry->opcode == kX86Movsx8RM)
+ << entry->name << " " << static_cast<int>(reg)
<< " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
}
DCHECK_LT(reg, 8);
@@ -632,7 +665,7 @@
code_buffer_.push_back(entry->skeleton.opcode);
if (entry->skeleton.opcode == 0x0F) {
code_buffer_.push_back(entry->skeleton.extra_opcode1);
- if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+ if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode1 == 0x3A) {
code_buffer_.push_back(entry->skeleton.extra_opcode2);
} else {
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
@@ -673,7 +706,7 @@
code_buffer_.push_back(entry->skeleton.opcode);
if (entry->skeleton.opcode == 0x0F) {
code_buffer_.push_back(entry->skeleton.extra_opcode1);
- if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+ if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode1 == 0x3A) {
code_buffer_.push_back(entry->skeleton.extra_opcode2);
} else {
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
@@ -713,7 +746,7 @@
code_buffer_.push_back(entry->skeleton.opcode);
if (entry->skeleton.opcode == 0x0F) {
code_buffer_.push_back(entry->skeleton.extra_opcode1);
- if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+ if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode1 == 0x3A) {
code_buffer_.push_back(entry->skeleton.extra_opcode2);
} else {
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
@@ -750,7 +783,7 @@
code_buffer_.push_back(entry->skeleton.opcode);
if (entry->skeleton.opcode == 0x0F) {
code_buffer_.push_back(entry->skeleton.extra_opcode1);
- if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+ if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode1 == 0x3A) {
code_buffer_.push_back(entry->skeleton.extra_opcode2);
} else {
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
@@ -809,7 +842,7 @@
code_buffer_.push_back(entry->skeleton.opcode);
if (entry->skeleton.opcode == 0x0F) {
code_buffer_.push_back(entry->skeleton.extra_opcode1);
- if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+ if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode1 == 0x3A) {
code_buffer_.push_back(entry->skeleton.extra_opcode2);
} else {
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
@@ -859,7 +892,7 @@
code_buffer_.push_back(entry->skeleton.opcode);
if (entry->skeleton.opcode == 0x0F) {
code_buffer_.push_back(entry->skeleton.extra_opcode1);
- if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+ if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode1 == 0x3A) {
code_buffer_.push_back(entry->skeleton.extra_opcode2);
} else {
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
@@ -924,7 +957,7 @@
}
if (entry->skeleton.opcode == 0x0F) {
code_buffer_.push_back(entry->skeleton.extra_opcode1);
- if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+ if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode1 == 0x3A) {
code_buffer_.push_back(entry->skeleton.extra_opcode2);
} else {
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
@@ -1038,7 +1071,7 @@
code_buffer_.push_back(entry->skeleton.opcode);
if (entry->skeleton.opcode == 0x0F) {
code_buffer_.push_back(entry->skeleton.extra_opcode1);
- if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+ if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode1 == 0x3A) {
code_buffer_.push_back(entry->skeleton.extra_opcode2);
} else {
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
@@ -1067,7 +1100,7 @@
code_buffer_.push_back(entry->skeleton.opcode);
if (entry->skeleton.opcode == 0x0F) {
code_buffer_.push_back(entry->skeleton.extra_opcode1);
- if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+ if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode1 == 0x3A) {
code_buffer_.push_back(entry->skeleton.extra_opcode2);
} else {
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
@@ -1090,11 +1123,13 @@
int base_or_table, uint8_t index, int scale, int table_or_disp) {
int disp;
if (entry->opcode == kX86PcRelLoadRA) {
- Mir2Lir::SwitchTable *tab_rec = reinterpret_cast<Mir2Lir::SwitchTable*>(table_or_disp);
+ Mir2Lir::EmbeddedData *tab_rec =
+ reinterpret_cast<Mir2Lir::EmbeddedData*>(UnwrapPointer(table_or_disp));
disp = tab_rec->offset;
} else {
DCHECK(entry->opcode == kX86PcRelAdr);
- Mir2Lir::FillArrayData *tab_rec = reinterpret_cast<Mir2Lir::FillArrayData*>(base_or_table);
+ Mir2Lir::EmbeddedData *tab_rec =
+ reinterpret_cast<Mir2Lir::EmbeddedData*>(UnwrapPointer(base_or_table));
disp = tab_rec->offset;
}
if (entry->skeleton.prefix1 != 0) {
@@ -1161,7 +1196,7 @@
* instruction. In those cases we will try to substitute a new code
* sequence or request that the trace be shortened and retried.
*/
-AssemblerStatus X86Mir2Lir::AssembleInstructions(uintptr_t start_addr) {
+AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
LIR *lir;
AssemblerStatus res = kSuccess; // Assume success
@@ -1181,13 +1216,13 @@
LIR *target_lir = lir->target;
DCHECK(target_lir != NULL);
int delta = 0;
- uintptr_t pc;
+ CodeOffset pc;
if (IS_SIMM8(lir->operands[0])) {
pc = lir->offset + 2 /* opcode + rel8 */;
} else {
pc = lir->offset + 6 /* 2 byte opcode + rel32 */;
}
- uintptr_t target = target_lir->offset;
+ CodeOffset target = target_lir->offset;
delta = target - pc;
if (IS_SIMM8(delta) != IS_SIMM8(lir->operands[0])) {
if (kVerbosePcFixup) {
@@ -1211,8 +1246,8 @@
case kX86Jcc32: {
LIR *target_lir = lir->target;
DCHECK(target_lir != NULL);
- uintptr_t pc = lir->offset + 6 /* 2 byte opcode + rel32 */;
- uintptr_t target = target_lir->offset;
+ CodeOffset pc = lir->offset + 6 /* 2 byte opcode + rel32 */;
+ CodeOffset target = target_lir->offset;
int delta = target - pc;
if (kVerbosePcFixup) {
LOG(INFO) << "Source:";
@@ -1228,13 +1263,13 @@
LIR *target_lir = lir->target;
DCHECK(target_lir != NULL);
int delta = 0;
- uintptr_t pc;
+ CodeOffset pc;
if (IS_SIMM8(lir->operands[0])) {
pc = lir->offset + 2 /* opcode + rel8 */;
} else {
pc = lir->offset + 5 /* opcode + rel32 */;
}
- uintptr_t target = target_lir->offset;
+ CodeOffset target = target_lir->offset;
delta = target - pc;
if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && delta == 0) {
// Useless branch
@@ -1257,8 +1292,8 @@
case kX86Jmp32: {
LIR *target_lir = lir->target;
DCHECK(target_lir != NULL);
- uintptr_t pc = lir->offset + 5 /* opcode + rel32 */;
- uintptr_t target = target_lir->offset;
+ CodeOffset pc = lir->offset + 5 /* opcode + rel32 */;
+ CodeOffset target = target_lir->offset;
int delta = target - pc;
lir->operands[0] = delta;
break;
@@ -1299,6 +1334,9 @@
DCHECK_EQ(0, entry->skeleton.ax_opcode);
DCHECK_EQ(0, entry->skeleton.immediate_bytes);
break;
+ case kRegOpcode: // lir operands - 0: reg
+ EmitOpRegOpcode(entry, lir->operands[0]);
+ break;
case kReg: // lir operands - 0: reg
EmitOpReg(entry, lir->operands[0]);
break;
@@ -1441,6 +1479,7 @@
* TODO: consolidate w/ Arm assembly mechanism.
*/
void X86Mir2Lir::AssembleLIR() {
+ cu_->NewTimingSplit("Assemble");
AssignOffsets();
int assembler_retries = 0;
/*
@@ -1464,6 +1503,7 @@
}
}
+ cu_->NewTimingSplit("LiteralData");
// Install literals
InstallLiteralPools();
@@ -1474,8 +1514,10 @@
InstallFillArrayData();
// Create the mapping table and native offset to reference map.
+ cu_->NewTimingSplit("PcMappingTable");
CreateMappingTables();
+ cu_->NewTimingSplit("GcMap");
CreateNativeGcMap();
}
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 7fad6f0..17924b0 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -31,15 +31,15 @@
* The sparse table in the literal pool is an array of <key,displacement>
* pairs.
*/
-void X86Mir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset,
+void X86Mir2Lir::GenSparseSwitch(MIR* mir, DexOffset table_offset,
RegLocation rl_src) {
const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
if (cu_->verbose) {
DumpSparseSwitchTable(table);
}
int entries = table[1];
- const int* keys = reinterpret_cast<const int*>(&table[2]);
- const int* targets = &keys[entries];
+ const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]);
+ const int32_t* targets = &keys[entries];
rl_src = LoadValue(rl_src, kCoreReg);
for (int i = 0; i < entries; i++) {
int key = keys[i];
@@ -66,15 +66,15 @@
* jmp r_start_of_method
* done:
*/
-void X86Mir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset,
+void X86Mir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset,
RegLocation rl_src) {
const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
if (cu_->verbose) {
DumpPackedSwitchTable(table);
}
// Add the table to the list - we'll process it later
- SwitchTable *tab_rec =
- static_cast<SwitchTable *>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
+ SwitchTable* tab_rec =
+ static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
int size = table[1];
@@ -103,8 +103,7 @@
// Load the displacement from the switch table
int disp_reg = AllocTemp();
- NewLIR5(kX86PcRelLoadRA, disp_reg, start_of_method_reg, keyReg, 2,
- reinterpret_cast<uintptr_t>(tab_rec));
+ NewLIR5(kX86PcRelLoadRA, disp_reg, start_of_method_reg, keyReg, 2, WrapPointer(tab_rec));
// Add displacement to start of method
OpRegReg(kOpAdd, start_of_method_reg, disp_reg);
// ..and go!
@@ -126,10 +125,10 @@
*
* Total size is 4+(width * size + 1)/2 16-bit code units.
*/
-void X86Mir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) {
+void X86Mir2Lir::GenFillArrayData(DexOffset table_offset, RegLocation rl_src) {
const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
// Add the table to the list - we'll process it later
- FillArrayData *tab_rec =
+ FillArrayData* tab_rec =
static_cast<FillArrayData*>(arena_->Alloc(sizeof(FillArrayData), ArenaAllocator::kAllocData));
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
@@ -144,7 +143,7 @@
LoadValueDirectFixed(rl_src, rX86_ARG0);
// Materialize a pointer to the fill data image
NewLIR1(kX86StartOfMethod, rX86_ARG2);
- NewLIR2(kX86PcRelAdr, rX86_ARG1, reinterpret_cast<uintptr_t>(tab_rec));
+ NewLIR2(kX86PcRelAdr, rX86_ARG1, WrapPointer(tab_rec));
NewLIR2(kX86Add32RR, rX86_ARG1, rX86_ARG2);
CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayData), rX86_ARG0,
rX86_ARG1, true);
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index c266e39..b28d7ef 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -74,7 +74,7 @@
void AssembleLIR();
int AssignInsnOffsets();
void AssignOffsets();
- AssemblerStatus AssembleInstructions(uintptr_t start_addr);
+ AssemblerStatus AssembleInstructions(CodeOffset start_addr);
void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
void SetupTargetResourceMasks(LIR* lir, uint64_t flags);
const char* GetTargetInstFmt(int opcode);
@@ -119,7 +119,7 @@
void GenDivZeroCheck(int reg_lo, int reg_hi);
void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
void GenExitSequence();
- void GenFillArrayData(uint32_t table_offset, RegLocation rl_src);
+ void GenFillArrayData(DexOffset table_offset, RegLocation rl_src);
void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
void GenSelect(BasicBlock* bb, MIR* mir);
@@ -129,8 +129,8 @@
int lit, int first_bit, int second_bit);
void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
- void GenPackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
- void GenSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+ void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
+ void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
void GenSpecialCase(BasicBlock* bb, MIR* mir, SpecialCaseHandler special_case);
// Single operation generators.
@@ -170,6 +170,7 @@
private:
void EmitDisp(int base, int disp);
+ void EmitOpRegOpcode(const X86EncodingMap* entry, uint8_t reg);
void EmitOpReg(const X86EncodingMap* entry, uint8_t reg);
void EmitOpMem(const X86EncodingMap* entry, uint8_t base, int disp);
void EmitMemReg(const X86EncodingMap* entry, uint8_t base, int disp, uint8_t reg);
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index f736b5e..c9d6bfc 100644
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -284,8 +284,8 @@
void X86Mir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
bool is_double) {
- LIR* taken = &block_label_list_[bb->taken->id];
- LIR* not_taken = &block_label_list_[bb->fall_through->id];
+ LIR* taken = &block_label_list_[bb->taken];
+ LIR* not_taken = &block_label_list_[bb->fall_through];
LIR* branch = NULL;
RegLocation rl_src1;
RegLocation rl_src2;
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index a9f2c59..3fbc763 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -166,7 +166,7 @@
}
void X86Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
- LIR* taken = &block_label_list_[bb->taken->id];
+ LIR* taken = &block_label_list_[bb->taken];
RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0);
RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2);
FlushAllRegs();
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 0f005da..901ac9e 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -223,7 +223,7 @@
buf += StringPrintf("%d", operand);
break;
case 'p': {
- SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(operand);
+ EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(operand));
buf += StringPrintf("0x%08x", tab_rec->offset);
break;
}
@@ -238,7 +238,7 @@
break;
case 't':
buf += StringPrintf("0x%08x (L%p)",
- reinterpret_cast<uint32_t>(base_addr)
+ reinterpret_cast<uintptr_t>(base_addr)
+ lir->offset + operand, lir->target);
break;
default:
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index c519bfe..6ec7ebb 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -117,6 +117,7 @@
switch (op) {
case kOpNeg: opcode = kX86Neg32R; break;
case kOpNot: opcode = kX86Not32R; break;
+ case kOpRev: opcode = kX86Bswap32R; break;
case kOpBlx: opcode = kX86CallR; break;
default:
LOG(FATAL) << "Bad case in OpReg " << op;
@@ -161,6 +162,13 @@
case kOpNeg:
OpRegCopy(r_dest_src1, r_src2);
return OpReg(kOpNeg, r_dest_src1);
+ case kOpRev:
+ OpRegCopy(r_dest_src1, r_src2);
+ return OpReg(kOpRev, r_dest_src1);
+ case kOpRevsh:
+ OpRegCopy(r_dest_src1, r_src2);
+ OpReg(kOpRev, r_dest_src1);
+ return OpRegImm(kOpAsr, r_dest_src1, 16);
// X86 binary opcodes
case kOpSub: opcode = kX86Sub32RR; break;
case kOpSbc: opcode = kX86Sbb32RR; break;
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index 643a3d5..3518131 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -243,7 +243,7 @@
// - lir operands - 0: base, 1: disp, 2: immediate
// AI - Array Immediate - opcode [base + index * scale + disp], #immediate
// - lir operands - 0: base, 1: index, 2: scale, 3: disp 4: immediate
- // TI - Thread Register - opcode fs:[disp], imm - where fs: is equal to Thread::Current()
+ // TI - Thread Immediate - opcode fs:[disp], imm - where fs: is equal to Thread::Current()
// - lir operands - 0: disp, 1: imm
#define BinaryOpCode(opcode) \
opcode ## 8MR, opcode ## 8AR, opcode ## 8TR, \
@@ -313,6 +313,7 @@
UnaryOpcode(kX86Imul, DaR, DaM, DaA),
UnaryOpcode(kX86Divmod, DaR, DaM, DaA),
UnaryOpcode(kX86Idivmod, DaR, DaM, DaA),
+ kX86Bswap32R,
#undef UnaryOpcode
#define Binary0fOpCode(opcode) \
opcode ## RR, opcode ## RM, opcode ## RA
@@ -381,6 +382,7 @@
kData, // Special case for raw data.
kNop, // Special case for variable length nop.
kNullary, // Opcode that takes no arguments.
+ kRegOpcode, // Shorter form of R instruction kind (opcode+rd)
kReg, kMem, kArray, // R, M and A instruction kinds.
kMemReg, kArrayReg, kThreadReg, // MR, AR and TR instruction kinds.
kRegReg, kRegMem, kRegArray, kRegThread, // RR, RM, RA and RT instruction kinds.
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index 0ca5fd4..b6c8922 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -38,18 +38,18 @@
}
BasicBlock* MIRGraph::NextUnvisitedSuccessor(BasicBlock* bb) {
- BasicBlock* res = NeedsVisit(bb->fall_through);
+ BasicBlock* res = NeedsVisit(GetBasicBlock(bb->fall_through));
if (res == NULL) {
- res = NeedsVisit(bb->taken);
+ res = NeedsVisit(GetBasicBlock(bb->taken));
if (res == NULL) {
- if (bb->successor_block_list.block_list_type != kNotUsed) {
- GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_block_list.blocks);
+ if (bb->successor_block_list_type != kNotUsed) {
+ GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_blocks);
while (true) {
SuccessorBlockInfo *sbi = iterator.Next();
if (sbi == NULL) {
break;
}
- res = NeedsVisit(sbi->block);
+ res = NeedsVisit(GetBasicBlock(sbi->block));
if (res != NULL) {
break;
}
@@ -63,7 +63,9 @@
void MIRGraph::MarkPreOrder(BasicBlock* block) {
block->visited = true;
/* Enqueue the pre_order block id */
- dfs_order_->Insert(block->id);
+ if (block->id != NullBasicBlockId) {
+ dfs_order_->Insert(block->id);
+ }
}
void MIRGraph::RecordDFSOrders(BasicBlock* block) {
@@ -79,7 +81,9 @@
continue;
}
curr->dfs_id = dfs_post_order_->Size();
- dfs_post_order_->Insert(curr->id);
+ if (curr->id != NullBasicBlockId) {
+ dfs_post_order_->Insert(curr->id);
+ }
succ.pop_back();
}
}
@@ -88,7 +92,8 @@
void MIRGraph::ComputeDFSOrders() {
/* Initialize or reset the DFS pre_order list */
if (dfs_order_ == NULL) {
- dfs_order_ = new (arena_) GrowableArray<int>(arena_, GetNumBlocks(), kGrowableArrayDfsOrder);
+ dfs_order_ = new (arena_) GrowableArray<BasicBlockId>(arena_, GetNumBlocks(),
+ kGrowableArrayDfsOrder);
} else {
/* Just reset the used length on the counter */
dfs_order_->Reset();
@@ -96,7 +101,8 @@
/* Initialize or reset the DFS post_order list */
if (dfs_post_order_ == NULL) {
- dfs_post_order_ = new (arena_) GrowableArray<int>(arena_, GetNumBlocks(), kGrowableArrayDfsPostOrder);
+ dfs_post_order_ = new (arena_) GrowableArray<BasicBlockId>(arena_, GetNumBlocks(),
+ kGrowableArrayDfsPostOrder);
} else {
/* Just reset the used length on the counter */
dfs_post_order_->Reset();
@@ -169,7 +175,7 @@
if (dom_post_order_traversal_ == NULL) {
// First time - create the array.
dom_post_order_traversal_ =
- new (arena_) GrowableArray<int>(arena_, num_reachable_blocks_,
+ new (arena_) GrowableArray<BasicBlockId>(arena_, num_reachable_blocks_,
kGrowableArrayDomPostOrderTraversal);
} else {
dom_post_order_traversal_->Reset();
@@ -177,7 +183,7 @@
ClearAllVisitedFlags();
std::vector<std::pair<BasicBlock*, ArenaBitVector::Iterator*> > work_stack;
bb->visited = true;
- work_stack.push_back(std::make_pair(bb, new (arena_) ArenaBitVector::Iterator(bb->i_dominated)));
+ work_stack.push_back(std::make_pair(bb, bb->i_dominated->GetIterator()));
while (!work_stack.empty()) {
const std::pair<BasicBlock*, ArenaBitVector::Iterator*>& curr = work_stack.back();
BasicBlock* curr_bb = curr.first;
@@ -190,14 +196,16 @@
BasicBlock* new_bb = GetBasicBlock(bb_idx);
new_bb->visited = true;
work_stack.push_back(
- std::make_pair(new_bb, new (arena_) ArenaBitVector::Iterator(new_bb->i_dominated)));
+ std::make_pair(new_bb, new_bb->i_dominated->GetIterator()));
} else {
// no successor/next
- dom_post_order_traversal_->Insert(curr_bb->id);
+ if (curr_bb->id != NullBasicBlockId) {
+ dom_post_order_traversal_->Insert(curr_bb->id);
+ }
work_stack.pop_back();
/* hacky loop detection */
- if (curr_bb->taken && curr_bb->dominators->IsBitSet(curr_bb->taken->id)) {
+ if ((curr_bb->taken != NullBasicBlockId) && curr_bb->dominators->IsBitSet(curr_bb->taken)) {
attributes_ |= METHOD_HAS_LOOP;
}
}
@@ -210,7 +218,7 @@
* TODO - evaluate whether phi will ever need to be inserted into exit
* blocks.
*/
- if (succ_bb->i_dom != dom_bb &&
+ if (succ_bb->i_dom != dom_bb->id &&
succ_bb->block_type == kDalvikByteCode &&
succ_bb->hidden == false) {
dom_bb->dom_frontier->SetBit(succ_bb->id);
@@ -220,20 +228,20 @@
/* Worker function to compute the dominance frontier */
bool MIRGraph::ComputeDominanceFrontier(BasicBlock* bb) {
/* Calculate DF_local */
- if (bb->taken) {
- CheckForDominanceFrontier(bb, bb->taken);
+ if (bb->taken != NullBasicBlockId) {
+ CheckForDominanceFrontier(bb, GetBasicBlock(bb->taken));
}
- if (bb->fall_through) {
- CheckForDominanceFrontier(bb, bb->fall_through);
+ if (bb->fall_through != NullBasicBlockId) {
+ CheckForDominanceFrontier(bb, GetBasicBlock(bb->fall_through));
}
- if (bb->successor_block_list.block_list_type != kNotUsed) {
- GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_block_list.blocks);
+ if (bb->successor_block_list_type != kNotUsed) {
+ GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_blocks);
while (true) {
SuccessorBlockInfo *successor_block_info = iterator.Next();
if (successor_block_info == NULL) {
break;
}
- BasicBlock* succ_bb = successor_block_info->block;
+ BasicBlock* succ_bb = GetBasicBlock(successor_block_info->block);
CheckForDominanceFrontier(bb, succ_bb);
}
}
@@ -306,17 +314,17 @@
/* Worker function to compute each block's immediate dominator */
bool MIRGraph::ComputeblockIDom(BasicBlock* bb) {
/* Special-case entry block */
- if (bb == GetEntryBlock()) {
+ if ((bb->id == NullBasicBlockId) || (bb == GetEntryBlock())) {
return false;
}
/* Iterate through the predecessors */
- GrowableArray<BasicBlock*>::Iterator iter(bb->predecessors);
+ GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
/* Find the first processed predecessor */
int idom = -1;
while (true) {
- BasicBlock* pred_bb = iter.Next();
+ BasicBlock* pred_bb = GetBasicBlock(iter.Next());
CHECK(pred_bb != NULL);
if (i_dom_list_[pred_bb->dfs_id] != NOTVISITED) {
idom = pred_bb->dfs_id;
@@ -326,7 +334,7 @@
/* Scan the rest of the predecessors */
while (true) {
- BasicBlock* pred_bb = iter.Next();
+ BasicBlock* pred_bb = GetBasicBlock(iter.Next());
if (!pred_bb) {
break;
}
@@ -352,7 +360,7 @@
if (bb == GetEntryBlock()) {
bb->dominators->ClearAllBits();
} else {
- bb->dominators->Copy(bb->i_dom->dominators);
+ bb->dominators->Copy(GetBasicBlock(bb->i_dom)->dominators);
}
bb->dominators->SetBit(bb->id);
return false;
@@ -364,7 +372,7 @@
DCHECK_NE(idom_dfs_idx, NOTVISITED);
int i_dom_idx = dfs_post_order_->Get(idom_dfs_idx);
BasicBlock* i_dom = GetBasicBlock(i_dom_idx);
- bb->i_dom = i_dom;
+ bb->i_dom = i_dom->id;
/* Add bb to the i_dominated set of the immediate dominator block */
i_dom->i_dominated->SetBit(bb->id);
}
@@ -412,7 +420,7 @@
} else {
temp_block_v_->ClearAllBits();
}
- GetEntryBlock()->i_dom = NULL;
+ GetEntryBlock()->i_dom = 0;
PreOrderDfsIterator iter3(this);
for (BasicBlock* bb = iter3.Next(); bb != NULL; bb = iter3.Next()) {
@@ -463,20 +471,22 @@
return false;
}
temp_dalvik_register_v->Copy(bb->data_flow_info->live_in_v);
- if (bb->taken && bb->taken->data_flow_info)
- ComputeSuccLineIn(temp_dalvik_register_v, bb->taken->data_flow_info->live_in_v,
+ BasicBlock* bb_taken = GetBasicBlock(bb->taken);
+ BasicBlock* bb_fall_through = GetBasicBlock(bb->fall_through);
+ if (bb_taken && bb_taken->data_flow_info)
+ ComputeSuccLineIn(temp_dalvik_register_v, bb_taken->data_flow_info->live_in_v,
bb->data_flow_info->def_v);
- if (bb->fall_through && bb->fall_through->data_flow_info)
- ComputeSuccLineIn(temp_dalvik_register_v, bb->fall_through->data_flow_info->live_in_v,
+ if (bb_fall_through && bb_fall_through->data_flow_info)
+ ComputeSuccLineIn(temp_dalvik_register_v, bb_fall_through->data_flow_info->live_in_v,
bb->data_flow_info->def_v);
- if (bb->successor_block_list.block_list_type != kNotUsed) {
- GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_block_list.blocks);
+ if (bb->successor_block_list_type != kNotUsed) {
+ GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_blocks);
while (true) {
SuccessorBlockInfo *successor_block_info = iterator.Next();
if (successor_block_info == NULL) {
break;
}
- BasicBlock* succ_bb = successor_block_info->block;
+ BasicBlock* succ_bb = GetBasicBlock(successor_block_info->block);
if (succ_bb->data_flow_info) {
ComputeSuccLineIn(temp_dalvik_register_v, succ_bb->data_flow_info->live_in_v,
bb->data_flow_info->def_v);
@@ -579,50 +589,37 @@
* predecessor blocks
*/
bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) {
- MIR *mir;
- std::vector<int> uses;
- std::vector<int> incoming_arc;
-
/* Phi nodes are at the beginning of each block */
- for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
if (mir->dalvikInsn.opcode != static_cast<Instruction::Code>(kMirOpPhi))
return true;
int ssa_reg = mir->ssa_rep->defs[0];
DCHECK_GE(ssa_reg, 0); // Shouldn't see compiler temps here
int v_reg = SRegToVReg(ssa_reg);
- uses.clear();
- incoming_arc.clear();
-
/* Iterate through the predecessors */
- GrowableArray<BasicBlock*>::Iterator iter(bb->predecessors);
+ GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
+ size_t num_uses = bb->predecessors->Size();
+ mir->ssa_rep->num_uses = num_uses;
+ int* uses = static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses,
+ ArenaAllocator::kAllocDFInfo));
+ mir->ssa_rep->uses = uses;
+ mir->ssa_rep->fp_use =
+ static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses, ArenaAllocator::kAllocDFInfo));
+ BasicBlockId* incoming =
+ static_cast<BasicBlockId*>(arena_->Alloc(sizeof(BasicBlockId) * num_uses,
+ ArenaAllocator::kAllocDFInfo));
+ mir->meta.phi_incoming = incoming;
+ int idx = 0;
while (true) {
- BasicBlock* pred_bb = iter.Next();
+ BasicBlock* pred_bb = GetBasicBlock(iter.Next());
if (!pred_bb) {
break;
}
int ssa_reg = pred_bb->data_flow_info->vreg_to_ssa_map[v_reg];
- uses.push_back(ssa_reg);
- incoming_arc.push_back(pred_bb->id);
- }
-
- /* Count the number of SSA registers for a Dalvik register */
- int num_uses = uses.size();
- mir->ssa_rep->num_uses = num_uses;
- mir->ssa_rep->uses =
- static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses, ArenaAllocator::kAllocDFInfo));
- mir->ssa_rep->fp_use =
- static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses, ArenaAllocator::kAllocDFInfo));
- int* incoming =
- static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses, ArenaAllocator::kAllocDFInfo));
- // TODO: Ugly, rework (but don't burden each MIR/LIR for Phi-only needs)
- mir->dalvikInsn.vB = reinterpret_cast<uintptr_t>(incoming);
-
- /* Set the uses array for the phi node */
- int *use_ptr = mir->ssa_rep->uses;
- for (int i = 0; i < num_uses; i++) {
- *use_ptr++ = uses[i];
- *incoming++ = incoming_arc[i];
+ uses[idx] = ssa_reg;
+ incoming[idx] = pred_bb->id;
+ idx++;
}
}
@@ -644,24 +641,24 @@
static_cast<int*>(arena_->Alloc(map_size, ArenaAllocator::kAllocDalvikToSSAMap));
memcpy(saved_ssa_map, vreg_to_ssa_map_, map_size);
- if (block->fall_through) {
- DoDFSPreOrderSSARename(block->fall_through);
+ if (block->fall_through != NullBasicBlockId) {
+ DoDFSPreOrderSSARename(GetBasicBlock(block->fall_through));
/* Restore SSA map snapshot */
memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);
}
- if (block->taken) {
- DoDFSPreOrderSSARename(block->taken);
+ if (block->taken != NullBasicBlockId) {
+ DoDFSPreOrderSSARename(GetBasicBlock(block->taken));
/* Restore SSA map snapshot */
memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);
}
- if (block->successor_block_list.block_list_type != kNotUsed) {
- GrowableArray<SuccessorBlockInfo*>::Iterator iterator(block->successor_block_list.blocks);
+ if (block->successor_block_list_type != kNotUsed) {
+ GrowableArray<SuccessorBlockInfo*>::Iterator iterator(block->successor_blocks);
while (true) {
SuccessorBlockInfo *successor_block_info = iterator.Next();
if (successor_block_info == NULL) {
break;
}
- BasicBlock* succ_bb = successor_block_info->block;
+ BasicBlock* succ_bb = GetBasicBlock(successor_block_info->block);
DoDFSPreOrderSSARename(succ_bb);
/* Restore SSA map snapshot */
memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 7c4a6ce..91b0188 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -355,7 +355,7 @@
jni_compiler_(NULL),
compiler_enable_auto_elf_loading_(NULL),
compiler_get_method_code_addr_(NULL),
- support_boot_image_fixup_(true),
+ support_boot_image_fixup_(instruction_set == kThumb2),
dedupe_code_("dedupe code"),
dedupe_mapping_table_("dedupe mapping table"),
dedupe_vmap_table_("dedupe vmap table"),
@@ -1058,10 +1058,12 @@
return false; // Incomplete knowledge needs slow path.
}
-void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType type, InvokeType sharp_type,
+void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType sharp_type,
+ bool no_guarantee_of_dex_cache_entry,
mirror::Class* referrer_class,
mirror::ArtMethod* method,
bool update_stats,
+ MethodReference* target_method,
uintptr_t* direct_code,
uintptr_t* direct_method) {
// For direct and static methods compute possible direct_code and direct_method values, ie
@@ -1070,46 +1072,103 @@
// invoked, so this can be passed to the out-of-line runtime support code.
*direct_code = 0;
*direct_method = 0;
+ bool use_dex_cache = false;
+ bool compiling_boot = Runtime::Current()->GetHeap()->GetContinuousSpaces().size() == 1;
if (compiler_backend_ == kPortable) {
if (sharp_type != kStatic && sharp_type != kDirect) {
return;
}
+ use_dex_cache = true;
} else {
if (sharp_type != kStatic && sharp_type != kDirect && sharp_type != kInterface) {
return;
}
+ // TODO: support patching on all architectures.
+ use_dex_cache = compiling_boot && !support_boot_image_fixup_;
}
- bool method_code_in_boot = method->GetDeclaringClass()->GetClassLoader() == NULL;
- if (!method_code_in_boot) {
- return;
- }
- bool has_clinit_trampoline = method->IsStatic() && !method->GetDeclaringClass()->IsInitialized();
- if (has_clinit_trampoline && (method->GetDeclaringClass() != referrer_class)) {
- // Ensure we run the clinit trampoline unless we are invoking a static method in the same class.
- return;
- }
- if (update_stats) {
- if (sharp_type != kInterface) { // Interfaces always go via a trampoline.
- stats_->DirectCallsToBoot(type);
- }
- stats_->DirectMethodsToBoot(type);
- }
- bool compiling_boot = Runtime::Current()->GetHeap()->GetContinuousSpaces().size() == 1;
- if (compiling_boot) {
- if (support_boot_image_fixup_) {
- MethodHelper mh(method);
- if (IsImageClass(mh.GetDeclaringClassDescriptorAsStringPiece())) {
- // We can only branch directly to Methods that are resolved in the DexCache.
- // Otherwise we won't invoke the resolution trampoline.
- *direct_method = -1;
- *direct_code = -1;
+ bool method_code_in_boot = (method->GetDeclaringClass()->GetClassLoader() == nullptr);
+ if (!use_dex_cache) {
+ if (!method_code_in_boot) {
+ use_dex_cache = true;
+ } else {
+ bool has_clinit_trampoline =
+ method->IsStatic() && !method->GetDeclaringClass()->IsInitialized();
+ if (has_clinit_trampoline && (method->GetDeclaringClass() != referrer_class)) {
+ // Ensure we run the clinit trampoline unless we are invoking a static method in the same
+ // class.
+ use_dex_cache = true;
}
}
- } else {
- if (Runtime::Current()->GetHeap()->FindSpaceFromObject(method, false)->IsImageSpace()) {
- *direct_method = reinterpret_cast<uintptr_t>(method);
+ }
+ if (update_stats && method_code_in_boot) {
+ if (sharp_type != kInterface) { // Interfaces always go via a trampoline until we get IMTs.
+ stats_->DirectCallsToBoot(*type);
}
- *direct_code = reinterpret_cast<uintptr_t>(method->GetEntryPointFromCompiledCode());
+ stats_->DirectMethodsToBoot(*type);
+ }
+ if (!use_dex_cache && compiling_boot) {
+ MethodHelper mh(method);
+ if (!IsImageClass(mh.GetDeclaringClassDescriptorAsStringPiece())) {
+ // We can only branch directly to Methods that are resolved in the DexCache.
+ // Otherwise we won't invoke the resolution trampoline.
+ use_dex_cache = true;
+ }
+ }
+ // The method is defined not within this dex file. We need a dex cache slot within the current
+ // dex file or direct pointers.
+ bool must_use_direct_pointers = false;
+ if (target_method->dex_file == method->GetDeclaringClass()->GetDexCache()->GetDexFile()) {
+ target_method->dex_method_index = method->GetDexMethodIndex();
+ } else {
+ // TODO: support patching from one dex file to another in the boot image.
+ use_dex_cache = use_dex_cache || compiling_boot;
+ if (no_guarantee_of_dex_cache_entry) {
+ // See if the method is also declared in this dex cache.
+ uint32_t dex_method_idx = MethodHelper(method).FindDexMethodIndexInOtherDexFile(
+ *referrer_class->GetDexCache()->GetDexFile());
+ if (dex_method_idx != DexFile::kDexNoIndex) {
+ target_method->dex_method_index = dex_method_idx;
+ } else {
+ must_use_direct_pointers = true;
+ }
+ }
+ }
+ if (use_dex_cache) {
+ if (must_use_direct_pointers) {
+ // Fail. Test above showed the only safe dispatch was via the dex cache, however, the direct
+ // pointers are required as the dex cache lacks an appropriate entry.
+ VLOG(compiler) << "Dex cache devirtualization failed for: " << PrettyMethod(method);
+ } else {
+ *type = sharp_type;
+ }
+ } else {
+ if (compiling_boot) {
+ *type = sharp_type;
+ *direct_method = -1;
+ if (sharp_type != kInterface) {
+ *direct_code = -1;
+ }
+ } else {
+ bool method_in_image =
+ Runtime::Current()->GetHeap()->FindSpaceFromObject(method, false)->IsImageSpace();
+ if (method_in_image) {
+ CHECK_EQ(method->IsAbstract(), sharp_type == kInterface);
+ *type = sharp_type;
+ *direct_method = reinterpret_cast<uintptr_t>(method);
+ if (*type != kInterface) {
+ *direct_code = reinterpret_cast<uintptr_t>(method->GetEntryPointFromCompiledCode());
+ }
+ target_method->dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile();
+ target_method->dex_method_index = method->GetDexMethodIndex();
+ } else if (!must_use_direct_pointers) {
+ // Set the code and rely on the dex cache for the method.
+ *type = sharp_type;
+ *direct_code = reinterpret_cast<uintptr_t>(method->GetEntryPointFromCompiledCode());
+ } else {
+ // Direct pointers were required but none were available.
+ VLOG(compiler) << "Dex cache devirtualization failed for: " << PrettyMethod(method);
+ }
+ }
}
}
@@ -1126,6 +1185,9 @@
ComputeMethodReferencedFromCompilingMethod(soa, mUnit, target_method->dex_method_index,
*invoke_type);
if (resolved_method != NULL) {
+ if (*invoke_type == kVirtual || *invoke_type == kSuper) {
+ *vtable_idx = resolved_method->GetMethodIndex();
+ }
// Don't try to fast-path if we don't understand the caller's class or this appears to be an
// Incompatible Class Change Error.
mirror::Class* referrer_class =
@@ -1166,13 +1228,14 @@
// dex cache, check that this resolved method is where we expect it.
CHECK(referrer_class->GetDexCache()->GetResolvedMethod(target_method->dex_method_index) ==
resolved_method) << PrettyMethod(resolved_method);
- if (update_stats) {
- stats_->ResolvedMethod(*invoke_type);
- stats_->VirtualMadeDirect(*invoke_type);
+ InvokeType orig_invoke_type = *invoke_type;
+ GetCodeAndMethodForDirectCall(invoke_type, kDirect, false, referrer_class, resolved_method,
+ update_stats, target_method, direct_code, direct_method);
+ if (update_stats && (*invoke_type == kDirect)) {
+ stats_->ResolvedMethod(orig_invoke_type);
+ stats_->VirtualMadeDirect(orig_invoke_type);
}
- GetCodeAndMethodForDirectCall(*invoke_type, kDirect, referrer_class, resolved_method,
- update_stats, direct_code, direct_method);
- *invoke_type = kDirect;
+ DCHECK_NE(*invoke_type, kSuper) << PrettyMethod(resolved_method);
return true;
}
const bool enableVerifierBasedSharpening = enable_devirtualization;
@@ -1194,76 +1257,16 @@
kVirtual);
CHECK(called_method != NULL);
CHECK(!called_method->IsAbstract());
- GetCodeAndMethodForDirectCall(*invoke_type, kDirect, referrer_class, called_method,
- update_stats, direct_code, direct_method);
- bool compiler_needs_dex_cache =
- (GetCompilerBackend() == kPortable) ||
- (GetCompilerBackend() == kQuick && instruction_set_ != kThumb2) ||
- (*direct_code == 0) || (*direct_code == static_cast<unsigned int>(-1)) ||
- (*direct_method == 0) || (*direct_method == static_cast<unsigned int>(-1));
- if ((devirt_map_target->dex_file != target_method->dex_file) &&
- compiler_needs_dex_cache) {
- // We need to use the dex cache to find either the method or code, and the dex file
- // containing the method isn't the one expected for the target method. Try to find
- // the method within the expected target dex file.
- // TODO: the -1 could be handled as direct code if the patching new the target dex
- // file.
- // TODO: quick only supports direct pointers with Thumb2.
- // TODO: the following should be factored into a common helper routine to find
- // one dex file's method within another.
- const DexFile* dexfile = target_method->dex_file;
- const DexFile* cm_dexfile =
- called_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
- const DexFile::MethodId& cm_method_id =
- cm_dexfile->GetMethodId(called_method->GetDexMethodIndex());
- const char* cm_descriptor = cm_dexfile->StringByTypeIdx(cm_method_id.class_idx_);
- const DexFile::StringId* descriptor = dexfile->FindStringId(cm_descriptor);
- if (descriptor != NULL) {
- const DexFile::TypeId* type_id =
- dexfile->FindTypeId(dexfile->GetIndexForStringId(*descriptor));
- if (type_id != NULL) {
- const char* cm_name = cm_dexfile->GetMethodName(cm_method_id);
- const DexFile::StringId* name = dexfile->FindStringId(cm_name);
- if (name != NULL) {
- uint16_t return_type_idx;
- std::vector<uint16_t> param_type_idxs;
- bool success =
- dexfile->CreateTypeList(cm_dexfile->GetMethodSignature(cm_method_id).ToString(),
- &return_type_idx, ¶m_type_idxs);
- if (success) {
- const DexFile::ProtoId* sig =
- dexfile->FindProtoId(return_type_idx, param_type_idxs);
- if (sig != NULL) {
- const DexFile::MethodId* method_id = dexfile->FindMethodId(*type_id,
- *name, *sig);
- if (method_id != NULL) {
- if (update_stats) {
- stats_->ResolvedMethod(*invoke_type);
- stats_->VirtualMadeDirect(*invoke_type);
- stats_->PreciseTypeDevirtualization();
- }
- target_method->dex_method_index =
- dexfile->GetIndexForMethodId(*method_id);
- *invoke_type = kDirect;
- return true;
- }
- }
- }
- }
- }
- }
- // TODO: the stats for direct code and method are off as we failed to find the direct
- // method in the referring method's dex cache/file.
- } else {
- if (update_stats) {
- stats_->ResolvedMethod(*invoke_type);
- stats_->VirtualMadeDirect(*invoke_type);
- stats_->PreciseTypeDevirtualization();
- }
- *target_method = *devirt_map_target;
- *invoke_type = kDirect;
- return true;
+ InvokeType orig_invoke_type = *invoke_type;
+ GetCodeAndMethodForDirectCall(invoke_type, kDirect, true, referrer_class, called_method,
+ update_stats, target_method, direct_code, direct_method);
+ if (update_stats && (*invoke_type == kDirect)) {
+ stats_->ResolvedMethod(orig_invoke_type);
+ stats_->VirtualMadeDirect(orig_invoke_type);
+ stats_->PreciseTypeDevirtualization();
}
+ DCHECK_NE(*invoke_type, kSuper);
+ return true;
}
}
if (*invoke_type == kSuper) {
@@ -1273,11 +1276,8 @@
if (update_stats) {
stats_->ResolvedMethod(*invoke_type);
}
- if (*invoke_type == kVirtual || *invoke_type == kSuper) {
- *vtable_idx = resolved_method->GetMethodIndex();
- }
- GetCodeAndMethodForDirectCall(*invoke_type, *invoke_type, referrer_class, resolved_method,
- update_stats, direct_code, direct_method);
+ GetCodeAndMethodForDirectCall(invoke_type, *invoke_type, false, referrer_class, resolved_method,
+ update_stats, target_method, direct_code, direct_method);
return true;
}
}
@@ -1747,6 +1747,7 @@
"Landroid/opengl/GLUtils;", // Calls android.opengl.GLUtils.nativeClassInit.
"Landroid/os/Build;", // Calls -..-> android.os.SystemProperties.native_get.
"Landroid/os/Build$VERSION;", // Requires Build.
+ "Landroid/os/Bundle;", // Calls android.os.Parcel.obtain -..> Parcel.nativeCreate.
"Landroid/os/Debug;", // Requires android.os.Environment.
"Landroid/os/Environment;", // Calls System.getenv.
"Landroid/os/FileUtils;", // Calls regex.Pattern.compile -..-> regex.Pattern.compileImpl.
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 7657af5..971021f 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -321,10 +321,12 @@
private:
// Compute constant code and method pointers when possible
- void GetCodeAndMethodForDirectCall(InvokeType type, InvokeType sharp_type,
+ void GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType sharp_type,
+ bool no_guarantee_of_dex_cache_entry,
mirror::Class* referrer_class,
mirror::ArtMethod* method,
bool update_stats,
+ MethodReference* target_method,
uintptr_t* direct_code, uintptr_t* direct_method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/compiler/elf_fixup.cc b/compiler/elf_fixup.cc
index 359c493..c571288 100644
--- a/compiler/elf_fixup.cc
+++ b/compiler/elf_fixup.cc
@@ -27,8 +27,9 @@
static const bool DEBUG_FIXUP = false;
bool ElfFixup::Fixup(File* file, uintptr_t oat_data_begin) {
- UniquePtr<ElfFile> elf_file(ElfFile::Open(file, true, false));
- CHECK(elf_file.get() != NULL);
+ std::string error_msg;
+ UniquePtr<ElfFile> elf_file(ElfFile::Open(file, true, false, &error_msg));
+ CHECK(elf_file.get() != nullptr) << error_msg;
// Lookup "oatdata" symbol address.
::llvm::ELF::Elf32_Addr oatdata_address = ElfWriter::GetOatDataAddress(elf_file.get());
diff --git a/compiler/elf_stripper.cc b/compiler/elf_stripper.cc
index 7fc662c..7ee8d3c 100644
--- a/compiler/elf_stripper.cc
+++ b/compiler/elf_stripper.cc
@@ -27,9 +27,11 @@
namespace art {
-bool ElfStripper::Strip(File* file) {
- UniquePtr<ElfFile> elf_file(ElfFile::Open(file, true, false));
- CHECK(elf_file.get() != NULL);
+bool ElfStripper::Strip(File* file, std::string* error_msg) {
+ UniquePtr<ElfFile> elf_file(ElfFile::Open(file, true, false, error_msg));
+ if (elf_file.get() == nullptr) {
+ return false;
+ }
// ELF files produced by MCLinker look roughly like this
//
@@ -120,7 +122,8 @@
elf_file->GetHeader().e_shoff = shoff;
int result = ftruncate(file->Fd(), offset);
if (result != 0) {
- PLOG(ERROR) << "Failed to truncate while stripping ELF file: " << file->GetPath();
+ *error_msg = StringPrintf("Failed to truncate while stripping ELF file: '%s': %s",
+ file->GetPath().c_str(), strerror(errno));
return false;
}
return true;
diff --git a/compiler/elf_stripper.h b/compiler/elf_stripper.h
index 6015b30..f1a1d46 100644
--- a/compiler/elf_stripper.h
+++ b/compiler/elf_stripper.h
@@ -17,6 +17,8 @@
#ifndef ART_COMPILER_ELF_STRIPPER_H_
#define ART_COMPILER_ELF_STRIPPER_H_
+#include <string>
+
#include "base/macros.h"
#include "os.h"
@@ -26,7 +28,7 @@
public:
// Strip an ELF file of unneeded debugging information.
// Returns true on success, false on failure.
- static bool Strip(File* file);
+ static bool Strip(File* file, std::string* error_msg);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ElfStripper);
diff --git a/compiler/elf_writer.cc b/compiler/elf_writer.cc
index d3c13dd..0bfe4a4 100644
--- a/compiler/elf_writer.cc
+++ b/compiler/elf_writer.cc
@@ -47,8 +47,9 @@
void ElfWriter::GetOatElfInformation(File* file,
size_t& oat_loaded_size,
size_t& oat_data_offset) {
- UniquePtr<ElfFile> elf_file(ElfFile::Open(file, false, false));
- CHECK(elf_file.get() != NULL);
+ std::string error_msg;
+ UniquePtr<ElfFile> elf_file(ElfFile::Open(file, false, false, &error_msg));
+ CHECK(elf_file.get() != NULL) << error_msg;
oat_loaded_size = elf_file->GetLoadedSize();
CHECK_NE(0U, oat_loaded_size);
diff --git a/compiler/elf_writer_mclinker.cc b/compiler/elf_writer_mclinker.cc
index e496ace..8e19ef6 100644
--- a/compiler/elf_writer_mclinker.cc
+++ b/compiler/elf_writer_mclinker.cc
@@ -153,8 +153,9 @@
void ElfWriterMclinker::AddOatInput(std::vector<uint8_t>& oat_contents) {
// Add an artificial memory input. Based on LinkerTest.
- UniquePtr<OatFile> oat_file(OatFile::OpenMemory(oat_contents, elf_file_->GetPath()));
- CHECK(oat_file.get() != NULL) << elf_file_->GetPath();
+ std::string error_msg;
+ UniquePtr<OatFile> oat_file(OatFile::OpenMemory(oat_contents, elf_file_->GetPath(), &error_msg));
+ CHECK(oat_file.get() != NULL) << elf_file_->GetPath() << ": " << error_msg;
const char* oat_data_start = reinterpret_cast<const char*>(&oat_file->GetOatHeader());
const size_t oat_data_length = oat_file->GetOatHeader().GetExecutableOffset();
@@ -344,8 +345,9 @@
#if defined(ART_USE_PORTABLE_COMPILER)
void ElfWriterMclinker::FixupOatMethodOffsets(const std::vector<const DexFile*>& dex_files) {
- UniquePtr<ElfFile> elf_file(ElfFile::Open(elf_file_, true, false));
- CHECK(elf_file.get() != NULL) << elf_file_->GetPath();
+ std::string error_msg;
+ UniquePtr<ElfFile> elf_file(ElfFile::Open(elf_file_, true, false, &error_msg));
+ CHECK(elf_file.get() != NULL) << elf_file_->GetPath() << ": " << error_msg;
llvm::ELF::Elf32_Addr oatdata_address = GetOatDataAddress(elf_file.get());
DexMethodIterator it(dex_files);
diff --git a/compiler/elf_writer_test.cc b/compiler/elf_writer_test.cc
index ffe1f72..eca67a8 100644
--- a/compiler/elf_writer_test.cc
+++ b/compiler/elf_writer_test.cc
@@ -65,23 +65,26 @@
UniquePtr<File> file(OS::OpenFileForReading(elf_filename.c_str()));
ASSERT_TRUE(file.get() != NULL);
{
- UniquePtr<ElfFile> ef(ElfFile::Open(file.get(), false, false));
- CHECK(ef.get() != NULL);
+ std::string error_msg;
+ UniquePtr<ElfFile> ef(ElfFile::Open(file.get(), false, false, &error_msg));
+ CHECK(ef.get() != nullptr) << error_msg;
EXPECT_ELF_FILE_ADDRESS(ef, dl_oatdata, "oatdata", false);
EXPECT_ELF_FILE_ADDRESS(ef, dl_oatexec, "oatexec", false);
EXPECT_ELF_FILE_ADDRESS(ef, dl_oatlastword, "oatlastword", false);
}
{
- UniquePtr<ElfFile> ef(ElfFile::Open(file.get(), false, false));
- CHECK(ef.get() != NULL);
+ std::string error_msg;
+ UniquePtr<ElfFile> ef(ElfFile::Open(file.get(), false, false, &error_msg));
+ CHECK(ef.get() != nullptr) << error_msg;
EXPECT_ELF_FILE_ADDRESS(ef, dl_oatdata, "oatdata", true);
EXPECT_ELF_FILE_ADDRESS(ef, dl_oatexec, "oatexec", true);
EXPECT_ELF_FILE_ADDRESS(ef, dl_oatlastword, "oatlastword", true);
}
{
- UniquePtr<ElfFile> ef(ElfFile::Open(file.get(), false, true));
- CHECK(ef.get() != NULL);
- ef->Load(false);
+ std::string error_msg;
+ UniquePtr<ElfFile> ef(ElfFile::Open(file.get(), false, true, &error_msg));
+ CHECK(ef.get() != nullptr) << error_msg;
+ CHECK(ef->Load(false, &error_msg)) << error_msg;
EXPECT_EQ(dl_oatdata, ef->FindDynamicSymbolAddress("oatdata"));
EXPECT_EQ(dl_oatexec, ef->FindDynamicSymbolAddress("oatexec"));
EXPECT_EQ(dl_oatlastword, ef->FindDynamicSymbolAddress("oatlastword"));
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index d4be7c0..a8b7c88 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -112,8 +112,11 @@
runtime_.reset();
java_lang_dex_file_ = NULL;
- UniquePtr<const DexFile> dex(DexFile::Open(GetLibCoreDexFileName(), GetLibCoreDexFileName()));
- ASSERT_TRUE(dex.get() != NULL);
+ std::string error_msg;
+ UniquePtr<const DexFile> dex(DexFile::Open(GetLibCoreDexFileName().c_str(),
+ GetLibCoreDexFileName().c_str(),
+ &error_msg));
+ ASSERT_TRUE(dex.get() != nullptr) << error_msg;
// Remove the reservation of the memory for use to load the image.
UnreserveImageSpace();
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index bcdc1c1..871cfd5 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -82,12 +82,14 @@
LOG(ERROR) << "Failed to open oat file " << oat_filename << " for " << oat_location;
return false;
}
- oat_file_ = OatFile::OpenWritable(oat_file.get(), oat_location);
- if (oat_file_ == NULL) {
- LOG(ERROR) << "Failed to open writable oat file " << oat_filename << " for " << oat_location;
+ std::string error_msg;
+ oat_file_ = OatFile::OpenWritable(oat_file.get(), oat_location, &error_msg);
+ if (oat_file_ == nullptr) {
+ LOG(ERROR) << "Failed to open writable oat file " << oat_filename << " for " << oat_location
+ << ": " << error_msg;
return false;
}
- class_linker->RegisterOatFile(*oat_file_);
+ CHECK_EQ(class_linker->RegisterOatFile(oat_file_), oat_file_);
interpreter_to_interpreter_bridge_offset_ =
oat_file_->GetOatHeader().GetInterpreterToInterpreterBridgeOffset();
@@ -192,9 +194,10 @@
int prot = PROT_READ | PROT_WRITE;
size_t length = RoundUp(size, kPageSize);
- image_.reset(MemMap::MapAnonymous("image writer image", NULL, length, prot));
- if (image_.get() == NULL) {
- LOG(ERROR) << "Failed to allocate memory for image file generation";
+ std::string error_msg;
+ image_.reset(MemMap::MapAnonymous("image writer image", NULL, length, prot, &error_msg));
+ if (UNLIKELY(image_.get() == nullptr)) {
+ LOG(ERROR) << "Failed to allocate memory for image file generation: " << error_msg;
return false;
}
return true;
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 6ac5d6a..af86743 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -28,6 +28,8 @@
class OatTest : public CommonTest {
protected:
+ static const bool kCompile = false; // DISABLED_ due to the time to compile libcore
+
void CheckMethod(mirror::ArtMethod* method,
const OatFile::OatMethod& oat_method,
const DexFile* dex_file)
@@ -40,7 +42,7 @@
EXPECT_TRUE(oat_method.GetCode() == NULL) << PrettyMethod(method) << " "
<< oat_method.GetCode();
#if !defined(ART_USE_PORTABLE_COMPILER)
- EXPECT_EQ(oat_method.GetFrameSizeInBytes(), static_cast<uint32_t>(kStackAlignment));
+ EXPECT_EQ(oat_method.GetFrameSizeInBytes(), kCompile ? kStackAlignment : 0);
EXPECT_EQ(oat_method.GetCoreSpillMask(), 0U);
EXPECT_EQ(oat_method.GetFpSpillMask(), 0U);
#endif
@@ -65,7 +67,6 @@
};
TEST_F(OatTest, WriteRead) {
- const bool compile = false; // DISABLED_ due to the time to compile libcore
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
// TODO: make selectable
@@ -77,7 +78,7 @@
InstructionSet insn_set = kIsTargetBuild ? kThumb2 : kX86;
compiler_driver_.reset(new CompilerDriver(compiler_backend, insn_set, false, NULL, 2, true));
jobject class_loader = NULL;
- if (compile) {
+ if (kCompile) {
base::TimingLogger timings("OatTest::WriteRead", false, false);
compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), timings);
}
@@ -96,12 +97,14 @@
tmp.GetFile());
ASSERT_TRUE(success);
- if (compile) { // OatWriter strips the code, regenerate to compare
+ if (kCompile) { // OatWriter strips the code, regenerate to compare
base::TimingLogger timings("CommonTest::WriteRead", false, false);
compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), timings);
}
- UniquePtr<OatFile> oat_file(OatFile::Open(tmp.GetFilename(), tmp.GetFilename(), NULL, false));
- ASSERT_TRUE(oat_file.get() != NULL);
+ std::string error_msg;
+ UniquePtr<OatFile> oat_file(OatFile::Open(tmp.GetFilename(), tmp.GetFilename(), NULL, false,
+ &error_msg));
+ ASSERT_TRUE(oat_file.get() != nullptr) << error_msg;
const OatHeader& oat_header = oat_file->GetOatHeader();
ASSERT_TRUE(oat_header.IsValid());
ASSERT_EQ(1U, oat_header.GetDexFileCount()); // core
@@ -111,22 +114,25 @@
const DexFile* dex_file = java_lang_dex_file_;
uint32_t dex_file_checksum = dex_file->GetLocationChecksum();
- const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_file->GetLocation(),
+ const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_file->GetLocation().c_str(),
&dex_file_checksum);
+ ASSERT_TRUE(oat_dex_file != nullptr);
CHECK_EQ(dex_file->GetLocationChecksum(), oat_dex_file->GetDexFileLocationChecksum());
for (size_t i = 0; i < dex_file->NumClassDefs(); i++) {
const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
const byte* class_data = dex_file->GetClassData(class_def);
- size_t num_virtual_methods =0;
+ size_t num_virtual_methods = 0;
if (class_data != NULL) {
ClassDataItemIterator it(*dex_file, class_data);
num_virtual_methods = it.NumVirtualMethods();
}
const char* descriptor = dex_file->GetClassDescriptor(class_def);
+ mirror::Class* klass = class_linker->FindClass(descriptor, NULL);
UniquePtr<const OatFile::OatClass> oat_class(oat_dex_file->GetOatClass(i));
-
- mirror::Class* klass = class_linker->FindClass(descriptor, NULL);
+ CHECK_EQ(mirror::Class::Status::kStatusNotReady, oat_class->GetStatus()) << descriptor;
+ CHECK_EQ(kCompile ? OatClassType::kOatClassAllCompiled : OatClassType::kOatClassNoneCompiled,
+ oat_class->GetType()) << descriptor;
size_t method_index = 0;
for (size_t i = 0; i < klass->NumDirectMethods(); i++, method_index++) {
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index f23b72b..f681d7d 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -18,6 +18,7 @@
#include <zlib.h>
+#include "base/bit_vector.h"
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
@@ -70,7 +71,9 @@
size_oat_dex_file_location_checksum_(0),
size_oat_dex_file_offset_(0),
size_oat_dex_file_methods_offsets_(0),
+ size_oat_class_type_(0),
size_oat_class_status_(0),
+ size_oat_class_method_bitmaps_(0),
size_oat_class_method_offsets_(0) {
size_t offset = InitOatHeader();
offset = InitOatDexFiles(offset);
@@ -142,12 +145,48 @@
oat_dex_files_[i]->methods_offsets_[class_def_index] = offset;
const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
const byte* class_data = dex_file->GetClassData(class_def);
- uint32_t num_methods = 0;
+ uint32_t num_non_null_compiled_methods = 0;
+ UniquePtr<std::vector<CompiledMethod*> > compiled_methods(new std::vector<CompiledMethod*>());
if (class_data != NULL) { // ie not an empty class, such as a marker interface
ClassDataItemIterator it(*dex_file, class_data);
size_t num_direct_methods = it.NumDirectMethods();
size_t num_virtual_methods = it.NumVirtualMethods();
- num_methods = num_direct_methods + num_virtual_methods;
+ size_t num_methods = num_direct_methods + num_virtual_methods;
+
+ // Fill in the compiled_methods_ array for methods that have a
+ // CompiledMethod. We track the number of non-null entries in
+ // num_non_null_compiled_methods since we only want to allocate
+ // OatMethodOffsets for the compiled methods.
+ compiled_methods->reserve(num_methods);
+ while (it.HasNextStaticField()) {
+ it.Next();
+ }
+ while (it.HasNextInstanceField()) {
+ it.Next();
+ }
+ size_t class_def_method_index = 0;
+ while (it.HasNextDirectMethod()) {
+ uint32_t method_idx = it.GetMemberIndex();
+ CompiledMethod* compiled_method =
+ compiler_driver_->GetCompiledMethod(MethodReference(dex_file, method_idx));
+ compiled_methods->push_back(compiled_method);
+ if (compiled_method != NULL) {
+ num_non_null_compiled_methods++;
+ }
+ class_def_method_index++;
+ it.Next();
+ }
+ while (it.HasNextVirtualMethod()) {
+ uint32_t method_idx = it.GetMemberIndex();
+ CompiledMethod* compiled_method =
+ compiler_driver_->GetCompiledMethod(MethodReference(dex_file, method_idx));
+ compiled_methods->push_back(compiled_method);
+ if (compiled_method != NULL) {
+ num_non_null_compiled_methods++;
+ }
+ class_def_method_index++;
+ it.Next();
+ }
}
ClassReference class_ref(dex_file, class_def_index);
@@ -161,7 +200,8 @@
status = mirror::Class::kStatusNotReady;
}
- OatClass* oat_class = new OatClass(offset, status, num_methods);
+ OatClass* oat_class = new OatClass(offset, compiled_methods.release(),
+ num_non_null_compiled_methods, status);
oat_classes_.push_back(oat_class);
offset += oat_class->SizeOf();
}
@@ -212,20 +252,20 @@
for (size_t i = 0; i != dex_files_->size(); ++i) {
const DexFile* dex_file = (*dex_files_)[i];
CHECK(dex_file != NULL);
- offset = InitOatCodeDexFile(offset, oat_class_index, *dex_file);
+ offset = InitOatCodeDexFile(offset, &oat_class_index, *dex_file);
}
return offset;
}
size_t OatWriter::InitOatCodeDexFile(size_t offset,
- size_t& oat_class_index,
+ size_t* oat_class_index,
const DexFile& dex_file) {
for (size_t class_def_index = 0;
class_def_index < dex_file.NumClassDefs();
- class_def_index++, oat_class_index++) {
+ class_def_index++, (*oat_class_index)++) {
const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
- offset = InitOatCodeClassDef(offset, oat_class_index, class_def_index, dex_file, class_def);
- oat_classes_[oat_class_index]->UpdateChecksum(*oat_header_);
+ offset = InitOatCodeClassDef(offset, *oat_class_index, class_def_index, dex_file, class_def);
+ oat_classes_[*oat_class_index]->UpdateChecksum(*oat_header_);
}
return offset;
}
@@ -240,7 +280,7 @@
return offset;
}
ClassDataItemIterator it(dex_file, class_data);
- CHECK_EQ(oat_classes_[oat_class_index]->method_offsets_.size(),
+ CHECK_LE(oat_classes_[oat_class_index]->method_offsets_.size(),
it.NumDirectMethods() + it.NumVirtualMethods());
// Skip fields
while (it.HasNextStaticField()) {
@@ -251,32 +291,35 @@
}
// Process methods
size_t class_def_method_index = 0;
+ size_t method_offsets_index = 0;
while (it.HasNextDirectMethod()) {
bool is_native = (it.GetMemberAccessFlags() & kAccNative) != 0;
offset = InitOatCodeMethod(offset, oat_class_index, class_def_index, class_def_method_index,
- is_native, it.GetMethodInvokeType(class_def), it.GetMemberIndex(),
- &dex_file);
+ &method_offsets_index, is_native,
+ it.GetMethodInvokeType(class_def), it.GetMemberIndex(), dex_file);
class_def_method_index++;
it.Next();
}
while (it.HasNextVirtualMethod()) {
bool is_native = (it.GetMemberAccessFlags() & kAccNative) != 0;
offset = InitOatCodeMethod(offset, oat_class_index, class_def_index, class_def_method_index,
- is_native, it.GetMethodInvokeType(class_def), it.GetMemberIndex(),
- &dex_file);
+ &method_offsets_index, is_native,
+ it.GetMethodInvokeType(class_def), it.GetMemberIndex(), dex_file);
class_def_method_index++;
it.Next();
}
DCHECK(!it.HasNext());
+ CHECK_LE(method_offsets_index, class_def_method_index);
return offset;
}
size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index,
size_t __attribute__((unused)) class_def_index,
size_t class_def_method_index,
+ size_t* method_offsets_index,
bool __attribute__((unused)) is_native,
InvokeType invoke_type,
- uint32_t method_idx, const DexFile* dex_file) {
+ uint32_t method_idx, const DexFile& dex_file) {
// derived from CompiledMethod if available
uint32_t code_offset = 0;
uint32_t frame_size_in_bytes = kStackAlignment;
@@ -292,8 +335,7 @@
oat_class->GetOatMethodOffsetsOffsetFromOatHeader(class_def_method_index);
#endif
- CompiledMethod* compiled_method =
- compiler_driver_->GetCompiledMethod(MethodReference(dex_file, method_idx));
+ CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
if (compiled_method != NULL) {
#if defined(ART_USE_PORTABLE_COMPILER)
compiled_method->AddOatdataOffsetToCompliledCodeOffset(
@@ -358,7 +400,7 @@
#if !defined(NDEBUG)
// We expect GC maps except when the class hasn't been verified or the method is native
- ClassReference class_ref(dex_file, class_def_index);
+ ClassReference class_ref(&dex_file, class_def_index);
CompiledClass* compiled_class = compiler_driver_->GetCompiledClass(class_ref);
mirror::Class::Status status;
if (compiled_class != NULL) {
@@ -371,7 +413,7 @@
CHECK(gc_map_size != 0 || is_native || status < mirror::Class::kStatusVerified)
<< &gc_map << " " << gc_map_size << " " << (is_native ? "true" : "false") << " "
<< (status < mirror::Class::kStatusVerified) << " " << status << " "
- << PrettyMethod(method_idx, *dex_file);
+ << PrettyMethod(method_idx, dex_file);
#endif
// Deduplicate GC maps
@@ -384,24 +426,26 @@
offset += gc_map_size;
oat_header_->UpdateChecksum(&gc_map[0], gc_map_size);
}
+
+ oat_class->method_offsets_[*method_offsets_index] =
+ OatMethodOffsets(code_offset,
+ frame_size_in_bytes,
+ core_spill_mask,
+ fp_spill_mask,
+ mapping_table_offset,
+ vmap_table_offset,
+ gc_map_offset);
+ (*method_offsets_index)++;
}
- oat_class->method_offsets_[class_def_method_index] =
- OatMethodOffsets(code_offset,
- frame_size_in_bytes,
- core_spill_mask,
- fp_spill_mask,
- mapping_table_offset,
- vmap_table_offset,
- gc_map_offset);
if (compiler_driver_->IsImage()) {
ClassLinker* linker = Runtime::Current()->GetClassLinker();
- mirror::DexCache* dex_cache = linker->FindDexCache(*dex_file);
+ mirror::DexCache* dex_cache = linker->FindDexCache(dex_file);
// Unchecked as we hold mutator_lock_ on entry.
ScopedObjectAccessUnchecked soa(Thread::Current());
- mirror::ArtMethod* method = linker->ResolveMethod(*dex_file, method_idx, dex_cache,
- NULL, NULL, invoke_type);
+ mirror::ArtMethod* method = linker->ResolveMethod(dex_file, method_idx, dex_cache,
+ NULL, NULL, invoke_type);
CHECK(method != NULL);
method->SetFrameSizeInBytes(frame_size_in_bytes);
method->SetCoreSpillMask(core_spill_mask);
@@ -491,7 +535,9 @@
DO_STAT(size_oat_dex_file_location_checksum_);
DO_STAT(size_oat_dex_file_offset_);
DO_STAT(size_oat_dex_file_methods_offsets_);
+ DO_STAT(size_oat_class_type_);
DO_STAT(size_oat_class_status_);
+ DO_STAT(size_oat_class_method_bitmaps_);
DO_STAT(size_oat_class_method_offsets_);
#undef DO_STAT
@@ -586,7 +632,7 @@
for (size_t i = 0; i != oat_dex_files_.size(); ++i) {
const DexFile* dex_file = (*dex_files_)[i];
CHECK(dex_file != NULL);
- relative_offset = WriteCodeDexFile(out, file_offset, relative_offset, oat_class_index,
+ relative_offset = WriteCodeDexFile(out, file_offset, relative_offset, &oat_class_index,
*dex_file);
if (relative_offset == 0) {
return 0;
@@ -596,12 +642,12 @@
}
size_t OatWriter::WriteCodeDexFile(OutputStream& out, const size_t file_offset,
- size_t relative_offset, size_t& oat_class_index,
+ size_t relative_offset, size_t* oat_class_index,
const DexFile& dex_file) {
for (size_t class_def_index = 0; class_def_index < dex_file.NumClassDefs();
- class_def_index++, oat_class_index++) {
+ class_def_index++, (*oat_class_index)++) {
const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
- relative_offset = WriteCodeClassDef(out, file_offset, relative_offset, oat_class_index,
+ relative_offset = WriteCodeClassDef(out, file_offset, relative_offset, *oat_class_index,
dex_file, class_def);
if (relative_offset == 0) {
return 0;
@@ -637,11 +683,12 @@
}
// Process methods
size_t class_def_method_index = 0;
+ size_t method_offsets_index = 0;
while (it.HasNextDirectMethod()) {
bool is_static = (it.GetMemberAccessFlags() & kAccStatic) != 0;
relative_offset = WriteCodeMethod(out, file_offset, relative_offset, oat_class_index,
- class_def_method_index, is_static, it.GetMemberIndex(),
- dex_file);
+ class_def_method_index, &method_offsets_index, is_static,
+ it.GetMemberIndex(), dex_file);
if (relative_offset == 0) {
return 0;
}
@@ -650,28 +697,30 @@
}
while (it.HasNextVirtualMethod()) {
relative_offset = WriteCodeMethod(out, file_offset, relative_offset, oat_class_index,
- class_def_method_index, false, it.GetMemberIndex(), dex_file);
+ class_def_method_index, &method_offsets_index, false,
+ it.GetMemberIndex(), dex_file);
if (relative_offset == 0) {
return 0;
}
class_def_method_index++;
it.Next();
}
+ DCHECK(!it.HasNext());
+ CHECK_LE(method_offsets_index, class_def_method_index);
return relative_offset;
}
size_t OatWriter::WriteCodeMethod(OutputStream& out, const size_t file_offset,
size_t relative_offset, size_t oat_class_index,
- size_t class_def_method_index, bool is_static,
- uint32_t method_idx, const DexFile& dex_file) {
- const CompiledMethod* compiled_method =
- compiler_driver_->GetCompiledMethod(MethodReference(&dex_file, method_idx));
-
- const OatMethodOffsets& method_offsets =
- oat_classes_[oat_class_index]->method_offsets_[class_def_method_index];
-
+ size_t class_def_method_index, size_t* method_offsets_index,
+ bool is_static, uint32_t method_idx, const DexFile& dex_file) {
+ OatClass* oat_class = oat_classes_[oat_class_index];
+ const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
if (compiled_method != NULL) { // ie. not an abstract method
+ const OatMethodOffsets method_offsets = oat_class->method_offsets_[*method_offsets_index];
+ (*method_offsets_index)++;
+
#if !defined(ART_USE_PORTABLE_COMPILER)
uint32_t aligned_offset = compiled_method->AlignCode(relative_offset);
uint32_t aligned_code_delta = aligned_offset - relative_offset;
@@ -854,29 +903,96 @@
return true;
}
-OatWriter::OatClass::OatClass(size_t offset, mirror::Class::Status status, uint32_t methods_count) {
+OatWriter::OatClass::OatClass(size_t offset,
+ std::vector<CompiledMethod*>* compiled_methods,
+ uint32_t num_non_null_compiled_methods,
+ mirror::Class::Status status) {
+ CHECK(compiled_methods != NULL);
+ uint32_t num_methods = compiled_methods->size();
+ CHECK_LE(num_non_null_compiled_methods, num_methods);
+
offset_ = offset;
+ compiled_methods_ = compiled_methods;
+ oat_method_offsets_offsets_from_oat_class_.resize(num_methods);
+
+ // Since both kOatClassNoneCompiled and kOatClassAllCompiled could
+ // apply when there are 0 methods, we just arbitrarily say that 0
+ // methods means kOatClassNoneCompiled and that we won't use
+ // kOatClassAllCompiled unless there is at least one compiled
+ // method. This means in an interpretter only system, we can assert
+ // that all classes are kOatClassNoneCompiled.
+ if (num_non_null_compiled_methods == 0) {
+ type_ = kOatClassNoneCompiled;
+ } else if (num_non_null_compiled_methods == num_methods) {
+ type_ = kOatClassAllCompiled;
+ } else {
+ type_ = kOatClassSomeCompiled;
+ }
+
status_ = status;
- method_offsets_.resize(methods_count);
+ method_offsets_.resize(num_non_null_compiled_methods);
+
+ uint32_t oat_method_offsets_offset_from_oat_class = sizeof(type_) + sizeof(status_);
+ if (type_ == kOatClassSomeCompiled) {
+ method_bitmap_ = new BitVector(num_methods, false, Allocator::GetMallocAllocator());
+ method_bitmap_size_ = method_bitmap_->GetSizeOf();
+ oat_method_offsets_offset_from_oat_class += sizeof(method_bitmap_size_);
+ oat_method_offsets_offset_from_oat_class += method_bitmap_size_;
+ } else {
+ method_bitmap_ = NULL;
+ method_bitmap_size_ = 0;
+ }
+
+ for (size_t i = 0; i < num_methods; i++) {
+ CompiledMethod* compiled_method = (*compiled_methods_)[i];
+ if (compiled_method == NULL) {
+ oat_method_offsets_offsets_from_oat_class_[i] = 0;
+ } else {
+ oat_method_offsets_offsets_from_oat_class_[i] = oat_method_offsets_offset_from_oat_class;
+ oat_method_offsets_offset_from_oat_class += sizeof(OatMethodOffsets);
+ if (type_ == kOatClassSomeCompiled) {
+ method_bitmap_->SetBit(i);
+ }
+ }
+ }
}
+OatWriter::OatClass::~OatClass() {
+ delete compiled_methods_;
+}
+
+#if defined(ART_USE_PORTABLE_COMPILER)
size_t OatWriter::OatClass::GetOatMethodOffsetsOffsetFromOatHeader(
size_t class_def_method_index_) const {
- return offset_ + GetOatMethodOffsetsOffsetFromOatClass(class_def_method_index_);
+ uint32_t method_offset = GetOatMethodOffsetsOffsetFromOatClass(class_def_method_index_);
+ if (method_offset == 0) {
+ return 0;
+ }
+ return offset_ + method_offset;
}
size_t OatWriter::OatClass::GetOatMethodOffsetsOffsetFromOatClass(
size_t class_def_method_index_) const {
- return sizeof(status_)
- + (sizeof(method_offsets_[0]) * class_def_method_index_);
+ return oat_method_offsets_offsets_from_oat_class_[class_def_method_index_];
}
+#endif
size_t OatWriter::OatClass::SizeOf() const {
- return GetOatMethodOffsetsOffsetFromOatClass(method_offsets_.size());
+ return sizeof(status_)
+ + sizeof(type_)
+ + ((method_bitmap_size_ == 0) ? 0 : sizeof(method_bitmap_size_))
+ + method_bitmap_size_
+ + (sizeof(method_offsets_[0]) * method_offsets_.size());
}
void OatWriter::OatClass::UpdateChecksum(OatHeader& oat_header) const {
oat_header.UpdateChecksum(&status_, sizeof(status_));
+ oat_header.UpdateChecksum(&type_, sizeof(type_));
+ if (method_bitmap_size_ != 0) {
+ CHECK_EQ(kOatClassSomeCompiled, type_);
+ oat_header.UpdateChecksum(&method_bitmap_size_, sizeof(method_bitmap_size_));
+ oat_header.UpdateChecksum(method_bitmap_->GetRawStorage(), method_bitmap_size_);
+ }
oat_header.UpdateChecksum(&method_offsets_[0],
sizeof(method_offsets_[0]) * method_offsets_.size());
}
@@ -890,17 +1006,30 @@
return false;
}
oat_writer->size_oat_class_status_ += sizeof(status_);
- DCHECK_EQ(static_cast<off_t>(file_offset + GetOatMethodOffsetsOffsetFromOatHeader(0)),
- out.Seek(0, kSeekCurrent));
+ if (!out.WriteFully(&type_, sizeof(type_))) {
+ PLOG(ERROR) << "Failed to write oat class type to " << out.GetLocation();
+ return false;
+ }
+ oat_writer->size_oat_class_type_ += sizeof(type_);
+ if (method_bitmap_size_ != 0) {
+ CHECK_EQ(kOatClassSomeCompiled, type_);
+ if (!out.WriteFully(&method_bitmap_size_, sizeof(method_bitmap_size_))) {
+ PLOG(ERROR) << "Failed to write method bitmap size to " << out.GetLocation();
+ return false;
+ }
+ oat_writer->size_oat_class_method_bitmaps_ += sizeof(method_bitmap_size_);
+ if (!out.WriteFully(method_bitmap_->GetRawStorage(), method_bitmap_size_)) {
+ PLOG(ERROR) << "Failed to write method bitmap to " << out.GetLocation();
+ return false;
+ }
+ oat_writer->size_oat_class_method_bitmaps_ += method_bitmap_size_;
+ }
if (!out.WriteFully(&method_offsets_[0],
sizeof(method_offsets_[0]) * method_offsets_.size())) {
PLOG(ERROR) << "Failed to write method offsets to " << out.GetLocation();
return false;
}
oat_writer->size_oat_class_method_offsets_ += sizeof(method_offsets_[0]) * method_offsets_.size();
- DCHECK_EQ(static_cast<off_t>(file_offset +
- GetOatMethodOffsetsOffsetFromOatHeader(method_offsets_.size())),
- out.Seek(0, kSeekCurrent));
return true;
}
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index d5f7e21..e3cb0a8 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -30,6 +30,7 @@
namespace art {
+class BitVector;
class OutputStream;
// OatHeader variable length with count of D OatDexFiles
@@ -90,7 +91,7 @@
size_t InitOatCodeDexFiles(size_t offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
size_t InitOatCodeDexFile(size_t offset,
- size_t& oat_class_index,
+ size_t* oat_class_index,
const DexFile& dex_file)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
size_t InitOatCodeClassDef(size_t offset,
@@ -99,21 +100,22 @@
const DexFile::ClassDef& class_def)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
size_t InitOatCodeMethod(size_t offset, size_t oat_class_index, size_t class_def_index,
- size_t class_def_method_index, bool is_native, InvokeType type,
- uint32_t method_idx, const DexFile*)
+ size_t class_def_method_index, size_t* method_offsets_index,
+ bool is_native, InvokeType type, uint32_t method_idx, const DexFile&)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool WriteTables(OutputStream& out, const size_t file_offset);
size_t WriteCode(OutputStream& out, const size_t file_offset);
size_t WriteCodeDexFiles(OutputStream& out, const size_t file_offset, size_t relative_offset);
size_t WriteCodeDexFile(OutputStream& out, const size_t file_offset, size_t relative_offset,
- size_t& oat_class_index, const DexFile& dex_file);
+ size_t* oat_class_index, const DexFile& dex_file);
size_t WriteCodeClassDef(OutputStream& out, const size_t file_offset, size_t relative_offset,
size_t oat_class_index, const DexFile& dex_file,
const DexFile::ClassDef& class_def);
size_t WriteCodeMethod(OutputStream& out, const size_t file_offset, size_t relative_offset,
- size_t oat_class_index, size_t class_def_method_index, bool is_static,
- uint32_t method_idx, const DexFile& dex_file);
+ size_t oat_class_index, size_t class_def_method_index,
+ size_t* method_offsets_index, bool is_static, uint32_t method_idx,
+ const DexFile& dex_file);
void ReportWriteFailure(const char* what, uint32_t method_idx, const DexFile& dex_file,
OutputStream& out) const;
@@ -142,13 +144,24 @@
class OatClass {
public:
- explicit OatClass(size_t offset, mirror::Class::Status status, uint32_t methods_count);
+ explicit OatClass(size_t offset,
+ std::vector<CompiledMethod*>* compiled_methods,
+ uint32_t num_non_null_compiled_methods,
+ mirror::Class::Status status);
+ ~OatClass();
+#if defined(ART_USE_PORTABLE_COMPILER)
size_t GetOatMethodOffsetsOffsetFromOatHeader(size_t class_def_method_index_) const;
size_t GetOatMethodOffsetsOffsetFromOatClass(size_t class_def_method_index_) const;
+#endif
size_t SizeOf() const;
void UpdateChecksum(OatHeader& oat_header) const;
bool Write(OatWriter* oat_writer, OutputStream& out, const size_t file_offset) const;
+ CompiledMethod* GetCompiledMethod(size_t class_def_method_index) const {
+ DCHECK(compiled_methods_ != NULL);
+ return (*compiled_methods_)[class_def_method_index];
+ }
+
// Offset of start of OatClass from beginning of OatHeader. It is
// used to validate file position when writing. For Portable, it
// is also used to calculate the position of the OatMethodOffsets
@@ -156,8 +169,37 @@
// patched to point to code in the Portable .o ELF objects.
size_t offset_;
+ // CompiledMethods for each class_def_method_index, or NULL if no method is available.
+ std::vector<CompiledMethod*>* compiled_methods_;
+
+ // Offset from OatClass::offset_ to the OatMethodOffsets for the
+ // class_def_method_index. If 0, it means the corresponding
+ // CompiledMethod entry in OatClass::compiled_methods_ should be
+ // NULL and that the OatClass::type_ should be kOatClassBitmap.
+ std::vector<uint32_t> oat_method_offsets_offsets_from_oat_class_;
+
// data to write
- mirror::Class::Status status_;
+
+ COMPILE_ASSERT(mirror::Class::Status::kStatusMax < (2 ^ 16), class_status_wont_fit_in_16bits);
+ int16_t status_;
+
+ COMPILE_ASSERT(OatClassType::kOatClassMax < (2 ^ 16), oat_class_type_wont_fit_in_16bits);
+ uint16_t type_;
+
+ uint32_t method_bitmap_size_;
+
+ // bit vector indexed by ClassDef method index. When
+ // OatClassType::type_ is kOatClassBitmap, a set bit indicates the
+ // method has an OatMethodOffsets in methods_offsets_, otherwise
+ // the entry was ommited to save space. If OatClassType::type_ is
+ // not is kOatClassBitmap, the bitmap will be NULL.
+ BitVector* method_bitmap_;
+
+ // OatMethodOffsets for each CompiledMethod present in the
+ // OatClass. Note that some may be missing if
+ // OatClass::compiled_methods_ contains NULL values (and
+ // oat_method_offsets_offsets_from_oat_class_ should contain 0
+ // values in this case).
std::vector<OatMethodOffsets> method_offsets_;
private:
@@ -214,7 +256,9 @@
uint32_t size_oat_dex_file_location_checksum_;
uint32_t size_oat_dex_file_offset_;
uint32_t size_oat_dex_file_methods_offsets_;
+ uint32_t size_oat_class_type_;
uint32_t size_oat_class_status_;
+ uint32_t size_oat_class_method_bitmaps_;
uint32_t size_oat_class_method_offsets_;
// Code mappings for deduplication. Deduplication is already done on a pointer basis by the
diff --git a/compiler/utils/dedupe_set.h b/compiler/utils/dedupe_set.h
index 53c1afa..638e0ec 100644
--- a/compiler/utils/dedupe_set.h
+++ b/compiler/utils/dedupe_set.h
@@ -22,6 +22,7 @@
#include "base/mutex.h"
#include "base/stl_util.h"
+#include "base/stringprintf.h"
namespace art {
diff --git a/compiler/utils/dedupe_set_test.cc b/compiler/utils/dedupe_set_test.cc
index 03d8b96..8abe6de 100644
--- a/compiler/utils/dedupe_set_test.cc
+++ b/compiler/utils/dedupe_set_test.cc
@@ -14,15 +14,12 @@
* limitations under the License.
*/
-#include "common_test.h"
#include "dedupe_set.h"
+#include "gtest/gtest.h"
+#include "thread-inl.h"
namespace art {
-class DedupeSetTest : public testing::Test {
- public:
-};
-
class DedupeHashFunc {
public:
size_t operator()(const std::vector<uint8_t>& array) const {
@@ -35,7 +32,7 @@
return hash;
}
};
-TEST_F(DedupeSetTest, Test) {
+TEST(DedupeSetTest, Test) {
Thread* self = Thread::Current();
typedef std::vector<uint8_t> ByteArray;
DedupeSet<ByteArray, size_t, DedupeHashFunc> deduplicator("test");
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index d5d1303..d8112ea 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -200,21 +200,24 @@
}
// Reads the class names (java.lang.Object) and returns a set of descriptors (Ljava/lang/Object;)
- CompilerDriver::DescriptorSet* ReadImageClassesFromZip(const std::string& zip_filename,
- const char* image_classes_filename) {
- UniquePtr<ZipArchive> zip_archive(ZipArchive::Open(zip_filename));
+ CompilerDriver::DescriptorSet* ReadImageClassesFromZip(const char* zip_filename,
+ const char* image_classes_filename,
+ std::string* error_msg) {
+ UniquePtr<ZipArchive> zip_archive(ZipArchive::Open(zip_filename, error_msg));
if (zip_archive.get() == NULL) {
- LOG(ERROR) << "Failed to open zip file " << zip_filename;
return NULL;
}
UniquePtr<ZipEntry> zip_entry(zip_archive->Find(image_classes_filename));
if (zip_entry.get() == NULL) {
- LOG(ERROR) << "Failed to find " << image_classes_filename << " within " << zip_filename;
+ *error_msg = StringPrintf("Failed to find '%s' within '%s': %s", image_classes_filename,
+ zip_filename, error_msg->c_str());
return NULL;
}
- UniquePtr<MemMap> image_classes_file(zip_entry->ExtractToMemMap(image_classes_filename));
+ UniquePtr<MemMap> image_classes_file(zip_entry->ExtractToMemMap(image_classes_filename,
+ error_msg));
if (image_classes_file.get() == NULL) {
- LOG(ERROR) << "Failed to extract " << image_classes_filename << " from " << zip_filename;
+ *error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", image_classes_filename,
+ zip_filename, error_msg->c_str());
return NULL;
}
const std::string image_classes_string(reinterpret_cast<char*>(image_classes_file->Begin()),
@@ -368,9 +371,10 @@
if (DexFilesContains(dex_files, parsed[i])) {
continue;
}
- const DexFile* dex_file = DexFile::Open(parsed[i], parsed[i]);
+ std::string error_msg;
+ const DexFile* dex_file = DexFile::Open(parsed[i].c_str(), parsed[i].c_str(), &error_msg);
if (dex_file == NULL) {
- LOG(WARNING) << "Failed to open dex file " << parsed[i];
+ LOG(WARNING) << "Failed to open dex file '" << parsed[i] << "': " << error_msg;
} else {
dex_files.push_back(dex_file);
}
@@ -416,9 +420,10 @@
for (size_t i = 0; i < dex_filenames.size(); i++) {
const char* dex_filename = dex_filenames[i];
const char* dex_location = dex_locations[i];
- const DexFile* dex_file = DexFile::Open(dex_filename, dex_location);
+ std::string error_msg;
+ const DexFile* dex_file = DexFile::Open(dex_filename, dex_location, &error_msg);
if (dex_file == NULL) {
- LOG(WARNING) << "Failed to open .dex from file '" << dex_filename << "'\n";
+ LOG(WARNING) << "Failed to open .dex from file '" << dex_filename << "': " << error_msg;
++failure_count;
} else {
dex_files.push_back(dex_file);
@@ -887,14 +892,17 @@
// If --image-classes was specified, calculate the full list of classes to include in the image
UniquePtr<CompilerDriver::DescriptorSet> image_classes(NULL);
if (image_classes_filename != NULL) {
+ std::string error_msg;
if (image_classes_zip_filename != NULL) {
image_classes.reset(dex2oat->ReadImageClassesFromZip(image_classes_zip_filename,
- image_classes_filename));
+ image_classes_filename,
+ &error_msg));
} else {
image_classes.reset(dex2oat->ReadImageClassesFromFile(image_classes_filename));
}
if (image_classes.get() == NULL) {
- LOG(ERROR) << "Failed to create list of image classes from " << image_classes_filename;
+ LOG(ERROR) << "Failed to create list of image classes from '" << image_classes_filename <<
+ "': " << error_msg;
return EXIT_FAILURE;
}
}
@@ -904,14 +912,18 @@
dex_files = Runtime::Current()->GetClassLinker()->GetBootClassPath();
} else {
if (dex_filenames.empty()) {
- UniquePtr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(zip_fd));
+ std::string error_msg;
+ UniquePtr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(zip_fd, zip_location.c_str(),
+ &error_msg));
if (zip_archive.get() == NULL) {
- LOG(ERROR) << "Failed to open zip from file descriptor for " << zip_location;
+ LOG(ERROR) << "Failed to open zip from file descriptor for '" << zip_location << "': "
+ << error_msg;
return EXIT_FAILURE;
}
- const DexFile* dex_file = DexFile::Open(*zip_archive.get(), zip_location);
+ const DexFile* dex_file = DexFile::Open(*zip_archive.get(), zip_location, &error_msg);
if (dex_file == NULL) {
- LOG(ERROR) << "Failed to open dex from file descriptor for zip file: " << zip_location;
+ LOG(ERROR) << "Failed to open dex from file descriptor for zip file '" << zip_location
+ << "': " << error_msg;
return EXIT_FAILURE;
}
dex_files.push_back(dex_file);
@@ -1063,7 +1075,8 @@
// Strip unneeded sections for target
off_t seek_actual = lseek(oat_file->Fd(), 0, SEEK_SET);
CHECK_EQ(0, seek_actual);
- ElfStripper::Strip(oat_file.get());
+ std::string error_msg;
+ CHECK(ElfStripper::Strip(oat_file.get(), &error_msg)) << error_msg;
// We wrote the oat file successfully, and want to keep it.
diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index 782c1f3..6239e9a 100644
--- a/disassembler/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
@@ -103,6 +103,10 @@
"tst", "rsb", "cmp", "cmn", "orr", "mul", "bic", "mvn",
};
+static const char* kThumbReverseOperations[] = {
+ "rev", "rev16", "rbit", "revsh"
+};
+
struct ArmRegister {
explicit ArmRegister(uint32_t r) : r(r) { CHECK_LE(r, 15U); }
ArmRegister(uint32_t instruction, uint32_t at_bit) : r((instruction >> at_bit) & 0xf) { CHECK_LE(r, 15U); }
@@ -995,6 +999,31 @@
}
break;
}
+ case 0x29: { // 0101001
+ // |111|11|1000000|0000|1111|1100|00|0 0|0000|
+ // |5 3|21|0 4|3 0|5 2|1 8|76|5 4|3 0|
+ // |---|--|-------|----|----|----|--|---|----|
+ // |332|22|2222222|1111|1111|1100|00|0 0|0000|
+ // |1 9|87|6 0|9 6|5 2|1 8|76|5 4|3 0|
+ // |---|--|-------|----|----|----|--|---|----|
+ // |111|11|0101001| Rm |1111| Rd |11|op3| Rm |
+ // REV - 111 11 0101001 mmmm 1111 dddd 1000 mmmm
+ // REV16 - 111 11 0101001 mmmm 1111 dddd 1001 mmmm
+ // RBIT - 111 11 0101001 mmmm 1111 dddd 1010 mmmm
+ // REVSH - 111 11 0101001 mmmm 1111 dddd 1011 mmmm
+ if ((instr & 0xf0c0) == 0xf080) {
+ uint32_t op3 = (instr >> 4) & 3;
+ opcode << kThumbReverseOperations[op3];
+ ArmRegister Rm(instr, 0);
+ ArmRegister Rd(instr, 8);
+ args << Rd << ", " << Rm;
+ ArmRegister Rm2(instr, 16);
+ if (Rm.r != Rm2.r || Rm.r == 13 || Rm.r == 15 || Rd.r == 13 || Rd.r == 15) {
+ args << " (UNPREDICTABLE)";
+ }
+ } // else unknown instruction
+ break;
+ }
case 0x05: case 0x0D: case 0x15: case 0x1D: { // 00xx101
// Load word
// |111|11|10|0 0|00|0|0000|1111|110000|000000|
@@ -1285,6 +1314,16 @@
DumpBranchTarget(args, instr_ptr + 4, imm32);
break;
}
+ case 0x50: case 0x51: // 101000x
+ case 0x52: case 0x53: // 101001x
+ case 0x56: case 0x57: { // 101011x
+ uint16_t op = (instr >> 6) & 3;
+ opcode << kThumbReverseOperations[op];
+ ThumbRegister Rm(instr, 3);
+ ThumbRegister Rd(instr, 0);
+ args << Rd << ", " << Rm;
+ break;
+ }
case 0x78: case 0x79: case 0x7A: case 0x7B: // 1111xxx
case 0x7C: case 0x7D: case 0x7E: case 0x7F: {
// If-Then, and hints
diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index e5cdb7b..9ed65cd 100644
--- a/disassembler/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
@@ -520,6 +520,10 @@
case 0xB7: opcode << "movzxw"; has_modrm = true; load = true; break;
case 0xBE: opcode << "movsxb"; has_modrm = true; load = true; break;
case 0xBF: opcode << "movsxw"; has_modrm = true; load = true; break;
+ case 0xC8: case 0xC9: case 0xCA: case 0xCB: case 0xCC: case 0xCD: case 0xCE: case 0xCF:
+ opcode << "bswap";
+ reg_in_opcode = true;
+ break;
default:
opcode << StringPrintf("unknown opcode '0F %02X'", *instr);
break;
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 6db5813..fdeeaec 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -173,9 +173,13 @@
MethodHelper mh(m);
for (size_t i = 0; i < oat_dex_files_.size(); i++) {
const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i];
- CHECK(oat_dex_file != NULL);
- UniquePtr<const DexFile> dex_file(oat_dex_file->OpenDexFile());
- if (dex_file.get() != NULL) {
+ CHECK(oat_dex_file != nullptr);
+ std::string error_msg;
+ UniquePtr<const DexFile> dex_file(oat_dex_file->OpenDexFile(&error_msg));
+ if (dex_file.get() == nullptr) {
+ LOG(WARNING) << "Failed to open dex file '" << oat_dex_file->GetDexFileLocation()
+ << "': " << error_msg;
+ } else {
const DexFile::ClassDef* class_def =
dex_file->FindClassDef(mh.GetDeclaringClassDescriptor());
if (class_def != NULL) {
@@ -199,8 +203,11 @@
for (size_t i = 0; i < oat_dex_files_.size(); i++) {
const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i];
CHECK(oat_dex_file != NULL);
- UniquePtr<const DexFile> dex_file(oat_dex_file->OpenDexFile());
- if (dex_file.get() == NULL) {
+ std::string error_msg;
+ UniquePtr<const DexFile> dex_file(oat_dex_file->OpenDexFile(&error_msg));
+ if (dex_file.get() == nullptr) {
+ LOG(WARNING) << "Failed to open dex file '" << oat_dex_file->GetDexFileLocation()
+ << "': " << error_msg;
continue;
}
offsets_.insert(reinterpret_cast<uint32_t>(&dex_file->GetHeader()));
@@ -245,9 +252,10 @@
os << "OAT DEX FILE:\n";
os << StringPrintf("location: %s\n", oat_dex_file.GetDexFileLocation().c_str());
os << StringPrintf("checksum: 0x%08x\n", oat_dex_file.GetDexFileLocationChecksum());
- UniquePtr<const DexFile> dex_file(oat_dex_file.OpenDexFile());
+ std::string error_msg;
+ UniquePtr<const DexFile> dex_file(oat_dex_file.OpenDexFile(&error_msg));
if (dex_file.get() == NULL) {
- os << "NOT FOUND\n\n";
+ os << "NOT FOUND: " << error_msg << "\n\n";
return;
}
for (size_t class_def_index = 0; class_def_index < dex_file->NumClassDefs(); class_def_index++) {
@@ -255,8 +263,10 @@
const char* descriptor = dex_file->GetClassDescriptor(class_def);
UniquePtr<const OatFile::OatClass> oat_class(oat_dex_file.GetOatClass(class_def_index));
CHECK(oat_class.get() != NULL);
- os << StringPrintf("%zd: %s (type_idx=%d) (", class_def_index, descriptor, class_def.class_idx_)
- << oat_class->GetStatus() << ")\n";
+ os << StringPrintf("%zd: %s (type_idx=%d)", class_def_index, descriptor, class_def.class_idx_)
+ << " (" << oat_class->GetStatus() << ")"
+ << " (" << oat_class->GetType() << ")\n";
+ // TODO: include bitmap here if type is kOatClassBitmap?
Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
std::ostream indented_os(&indent_filter);
DumpOatClass(indented_os, *oat_class.get(), *(dex_file.get()), class_def);
@@ -727,9 +737,10 @@
os << " (" << oat_location << ")";
}
os << "\n";
- const OatFile* oat_file = class_linker->FindOatFileFromOatLocation(oat_location);
+ std::string error_msg;
+ const OatFile* oat_file = class_linker->FindOatFileFromOatLocation(oat_location, &error_msg);
if (oat_file == NULL) {
- os << "NOT FOUND\n";
+ os << "NOT FOUND: " << error_msg << "\n";
return;
}
os << "\n";
@@ -775,7 +786,7 @@
os << "STATS:\n" << std::flush;
UniquePtr<File> file(OS::OpenFileForReading(image_filename_.c_str()));
if (file.get() == NULL) {
- std::string cache_location(GetDalvikCacheFilenameOrDie(image_filename_));
+ std::string cache_location(GetDalvikCacheFilenameOrDie(image_filename_.c_str()));
file.reset(OS::OpenFileForReading(cache_location.c_str()));
if (file.get() == NULL) {
LOG(WARNING) << "Failed to find image in " << image_filename_
@@ -1412,10 +1423,11 @@
}
if (oat_filename != NULL) {
+ std::string error_msg;
OatFile* oat_file =
- OatFile::Open(oat_filename, oat_filename, NULL, false);
+ OatFile::Open(oat_filename, oat_filename, NULL, false, &error_msg);
if (oat_file == NULL) {
- fprintf(stderr, "Failed to open oat file from %s\n", oat_filename);
+ fprintf(stderr, "Failed to open oat file from '%s': %s\n", oat_filename, error_msg.c_str());
return EXIT_FAILURE;
}
OatDumper oat_dumper(*host_prefix.get(), *oat_file);
diff --git a/runtime/Android.mk b/runtime/Android.mk
index d8abbf1..e4b7e47 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -21,6 +21,8 @@
LIBART_COMMON_SRC_FILES := \
atomic.cc.arm \
barrier.cc \
+ base/allocator.cc \
+ base/bit_vector.cc \
base/logging.cc \
base/mutex.cc \
base/stringpiece.cc \
@@ -177,6 +179,7 @@
runtime_android.cc \
thread_android.cc
+LIBART_LDFLAGS :=
ifeq ($(TARGET_ARCH),arm)
LIBART_TARGET_SRC_FILES += \
arch/arm/context_arm.cc.arm \
@@ -194,6 +197,7 @@
arch/x86/portable_entrypoints_x86.S \
arch/x86/quick_entrypoints_x86.S \
arch/x86/thread_x86.cc
+LIBART_LDFLAGS += -Wl,--no-fatal-warnings
else # TARGET_ARCH != x86
ifeq ($(TARGET_ARCH),mips)
LIBART_TARGET_SRC_FILES += \
@@ -245,6 +249,7 @@
locks.h \
lock_word.h \
mirror/class.h \
+ oat.h \
thread.h \
thread_state.h \
verifier/method_verifier.h
@@ -304,6 +309,7 @@
LOCAL_GENERATED_SOURCES += $$(ENUM_OPERATOR_OUT_GEN)
LOCAL_CFLAGS := $(LIBART_CFLAGS)
+ LOCAL_LDFLAGS := $(LIBART_LDFLAGS)
ifeq ($$(art_target_or_host),target)
LOCAL_CLANG := $(ART_TARGET_CLANG)
LOCAL_CFLAGS += $(ART_TARGET_CFLAGS)
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index c98b764..736ce2f 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -48,7 +48,7 @@
/*
* Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kRefsOnly). Restoration assumes non-moving GC.
+ * Runtime::CreateCalleeSaveMethod(kRefsOnly).
*/
.macro SETUP_REF_ONLY_CALLEE_SAVE_FRAME
push {r5-r8, r10-r11, lr} @ 7 words of callee saves
@@ -81,7 +81,7 @@
/*
* Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kRefsAndArgs). Restoration assumes non-moving GC.
+ * Runtime::CreateCalleeSaveMethod(kRefsAndArgs).
*/
.macro SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
push {r1-r3, r5-r8, r10-r11, lr} @ 10 words of callee saves
@@ -1030,12 +1030,13 @@
mov r3, sp @ pass SP
blx artQuickProxyInvokeHandler @ (Method* proxy method, receiver, Thread*, SP)
ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
- ldr lr, [sp, #44] @ restore lr
- add sp, #48 @ pop frame
- .cfi_adjust_cfa_offset -48
+ add sp, #16 @ skip r1-r3, 4 bytes padding.
+ .cfi_adjust_cfa_offset -16
cbnz r2, 1f @ success if no exception is pending
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
bx lr @ return on success
1:
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
DELIVER_PENDING_EXCEPTION
END art_quick_proxy_invoke_handler
@@ -1048,11 +1049,7 @@
cbz r0, 1f @ is code pointer null? goto exception
mov r12, r0
ldr r0, [sp, #0] @ load resolved method in r0
- ldr r1, [sp, #8] @ restore non-callee save r1
- ldrd r2, [sp, #12] @ restore non-callee saves r2-r3
- ldr lr, [sp, #44] @ restore lr
- add sp, #48 @ rewind sp
- .cfi_adjust_cfa_offset -48
+ RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
bx r12 @ tail-call into actual code
1:
RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
@@ -1066,12 +1063,13 @@
mov r2, sp @ pass SP
blx artQuickToInterpreterBridge @ (Method* method, Thread*, SP)
ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
- ldr lr, [sp, #44] @ restore lr
- add sp, #48 @ pop frame
- .cfi_adjust_cfa_offset -48
+ add sp, #16 @ skip r1-r3, 4 bytes padding.
+ .cfi_adjust_cfa_offset -16
cbnz r2, 1f @ success if no exception is pending
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
bx lr @ return on success
1:
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
DELIVER_PENDING_EXCEPTION
END art_quick_to_interpreter_bridge
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 897aaf4..031d13a 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -1051,11 +1051,9 @@
jal artQuickProxyInvokeHandler # (Method* proxy method, receiver, Thread*, SP)
move $a3, $sp # pass $sp
lw $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
- lw $gp, 52($sp) # restore $gp
- lw $ra, 60($sp) # restore $ra
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
bnez $t0, 1f
- addiu $sp, $sp, 64 # pop frame
- .cfi_adjust_cfa_offset -64
+ nop
jr $ra
nop
1:
@@ -1069,18 +1067,14 @@
move $a2, rSELF # pass Thread::Current
jal artQuickResolutionTrampoline # (Method* called, receiver, Thread*, SP)
move $a3, $sp # pass $sp
- lw $gp, 52($sp) # restore $gp
- lw $ra, 60($sp) # restore $ra
beqz $v0, 1f
lw $a0, 0($sp) # load resolved method to $a0
- lw $a1, 4($sp) # restore non-callee save $a1
- lw $a2, 8($sp) # restore non-callee save $a2
- lw $a3, 12($sp) # restore non-callee save $a3
+ RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
move $t9, $v0 # code pointer must be in $t9 to generate the global pointer
jr $v0 # tail call to method
+ nop
1:
- addiu $sp, $sp, 64 # pop frame
- .cfi_adjust_cfa_offset -64
+ RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
DELIVER_PENDING_EXCEPTION
END art_quick_resolution_trampoline
@@ -1092,11 +1086,9 @@
jal artQuickToInterpreterBridge # (Method* method, Thread*, SP)
move $a2, $sp # pass $sp
lw $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
- lw $gp, 52($sp) # restore $gp
- lw $ra, 60($sp) # restore $ra
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
bnez $t0, 1f
- addiu $sp, $sp, 64 # pop frame
- .cfi_adjust_cfa_offset -64
+ nop
jr $ra
nop
1:
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index d7e1be8..805f6f4 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1034,8 +1034,9 @@
movd %eax, %xmm0 // place return value also into floating point return value
movd %edx, %xmm1
punpckldq %xmm1, %xmm0
- addl LITERAL(44), %esp // pop arguments
- .cfi_adjust_cfa_offset -44
+ addl LITERAL(16), %esp // pop arguments
+ .cfi_adjust_cfa_offset -16
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception
END_FUNCTION art_quick_to_interpreter_bridge
diff --git a/runtime/base/allocator.cc b/runtime/base/allocator.cc
new file mode 100644
index 0000000..4f7753d
--- /dev/null
+++ b/runtime/base/allocator.cc
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "allocator.h"
+
+#include <inttypes.h>
+#include <stdlib.h>
+
+#include "base/logging.h"
+
+namespace art {
+
+class MallocAllocator : public Allocator {
+ public:
+ explicit MallocAllocator() {}
+ ~MallocAllocator() {}
+
+ virtual void* Alloc(size_t size) {
+ return calloc(sizeof(uint8_t), size);
+ }
+
+ virtual void Free(void* p) {
+ free(p);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MallocAllocator);
+};
+
+MallocAllocator g_malloc_allocator;
+
+class NoopAllocator : public Allocator {
+ public:
+ explicit NoopAllocator() {}
+ ~NoopAllocator() {}
+
+ virtual void* Alloc(size_t size) {
+ LOG(FATAL) << "NoopAllocator::Alloc should not be called";
+ return NULL;
+ }
+
+ virtual void Free(void* p) {
+ // Noop.
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(NoopAllocator);
+};
+
+NoopAllocator g_noop_allocator;
+
+Allocator* Allocator::GetMallocAllocator() {
+ return &g_malloc_allocator;
+}
+
+Allocator* Allocator::GetNoopAllocator() {
+ return &g_noop_allocator;
+}
+
+
+} // namespace art
diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h
new file mode 100644
index 0000000..917bf0b
--- /dev/null
+++ b/runtime/base/allocator.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_ALLOCATOR_H_
+#define ART_RUNTIME_BASE_ALLOCATOR_H_
+
+#include "base/macros.h"
+
+namespace art {
+
+class Allocator {
+ public:
+ static Allocator* GetMallocAllocator();
+ static Allocator* GetNoopAllocator();
+
+ Allocator() {}
+ virtual ~Allocator() {}
+
+ virtual void* Alloc(size_t) = 0;
+ virtual void Free(void*) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Allocator);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_BASE_ALLOCATOR_H_
diff --git a/runtime/base/bit_vector.cc b/runtime/base/bit_vector.cc
new file mode 100644
index 0000000..3b82651
--- /dev/null
+++ b/runtime/base/bit_vector.cc
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "bit_vector.h"
+
+namespace art {
+
+// TODO: profile to make sure this is still a win relative to just using shifted masks.
+static uint32_t check_masks[32] = {
+ 0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010,
+ 0x00000020, 0x00000040, 0x00000080, 0x00000100, 0x00000200,
+ 0x00000400, 0x00000800, 0x00001000, 0x00002000, 0x00004000,
+ 0x00008000, 0x00010000, 0x00020000, 0x00040000, 0x00080000,
+ 0x00100000, 0x00200000, 0x00400000, 0x00800000, 0x01000000,
+ 0x02000000, 0x04000000, 0x08000000, 0x10000000, 0x20000000,
+ 0x40000000, 0x80000000 };
+
+static inline uint32_t BitsToWords(uint32_t bits) {
+ return (bits + 31) >> 5;
+}
+
+// TODO: replace excessive argument defaulting when we are at gcc 4.7
+// or later on host with delegating constructor support. Specifically,
+// starts_bits and storage_size/storage are mutually exclusive.
+BitVector::BitVector(uint32_t start_bits,
+ bool expandable,
+ Allocator* allocator,
+ uint32_t storage_size,
+ uint32_t* storage)
+ : allocator_(allocator),
+ expandable_(expandable),
+ storage_size_(storage_size),
+ storage_(storage) {
+ DCHECK_EQ(sizeof(storage_[0]), 4U); // Assuming 32-bit units.
+ if (storage_ == NULL) {
+ storage_size_ = BitsToWords(start_bits);
+ storage_ = static_cast<uint32_t*>(allocator_->Alloc(storage_size_ * sizeof(uint32_t)));
+ }
+}
+
+BitVector::~BitVector() {
+ allocator_->Free(storage_);
+}
+
+/*
+ * Determine whether or not the specified bit is set.
+ */
+bool BitVector::IsBitSet(uint32_t num) const {
+ DCHECK_LT(num, storage_size_ * sizeof(uint32_t) * 8);
+
+ uint32_t val = storage_[num >> 5] & check_masks[num & 0x1f];
+ return (val != 0);
+}
+
+// Mark all bits bit as "clear".
+void BitVector::ClearAllBits() {
+ memset(storage_, 0, storage_size_ * sizeof(uint32_t));
+}
+
+// Mark the specified bit as "set".
+/*
+ * TUNING: this could have pathologically bad growth/expand behavior. Make sure we're
+ * not using it badly or change resize mechanism.
+ */
+void BitVector::SetBit(uint32_t num) {
+ if (num >= storage_size_ * sizeof(uint32_t) * 8) {
+ DCHECK(expandable_) << "Attempted to expand a non-expandable bitmap to position " << num;
+
+ /* Round up to word boundaries for "num+1" bits */
+ uint32_t new_size = BitsToWords(num + 1);
+ DCHECK_GT(new_size, storage_size_);
+ uint32_t *new_storage =
+ static_cast<uint32_t*>(allocator_->Alloc(new_size * sizeof(uint32_t)));
+ memcpy(new_storage, storage_, storage_size_ * sizeof(uint32_t));
+ // Zero out the new storage words.
+ memset(&new_storage[storage_size_], 0, (new_size - storage_size_) * sizeof(uint32_t));
+ // TOTO: collect stats on space wasted because of resize.
+ storage_ = new_storage;
+ storage_size_ = new_size;
+ }
+
+ storage_[num >> 5] |= check_masks[num & 0x1f];
+}
+
+// Mark the specified bit as "unset".
+void BitVector::ClearBit(uint32_t num) {
+ DCHECK_LT(num, storage_size_ * sizeof(uint32_t) * 8);
+ storage_[num >> 5] &= ~check_masks[num & 0x1f];
+}
+
+// Intersect with another bit vector. Sizes and expandability must be the same.
+void BitVector::Intersect(const BitVector* src) {
+ DCHECK_EQ(storage_size_, src->GetStorageSize());
+ DCHECK_EQ(expandable_, src->IsExpandable());
+ for (uint32_t idx = 0; idx < storage_size_; idx++) {
+ storage_[idx] &= src->GetRawStorageWord(idx);
+ }
+}
+
+/*
+ * Union with another bit vector. Sizes and expandability must be the same.
+ */
+void BitVector::Union(const BitVector* src) {
+ DCHECK_EQ(storage_size_, src->GetStorageSize());
+ DCHECK_EQ(expandable_, src->IsExpandable());
+ for (uint32_t idx = 0; idx < storage_size_; idx++) {
+ storage_[idx] |= src->GetRawStorageWord(idx);
+ }
+}
+
+// Count the number of bits that are set.
+uint32_t BitVector::NumSetBits() const {
+ uint32_t count = 0;
+ for (uint32_t word = 0; word < storage_size_; word++) {
+ count += __builtin_popcount(storage_[word]);
+ }
+ return count;
+}
+
+// Count the number of bits that are set up through and including num.
+uint32_t BitVector::NumSetBits(uint32_t num) const {
+ DCHECK_LT(num, storage_size_ * sizeof(uint32_t) * 8);
+ uint32_t last_word = num >> 5;
+ uint32_t partial_word_bits = num & 0x1f;
+
+ // partial_word_bits | # | | | partial_word_mask
+ // 00000 | 0 | 0xffffffff >> (31 - 0) | (1 << (0 + 1)) - 1 | 0x00000001
+ // 00001 | 1 | 0xffffffff >> (31 - 1) | (1 << (1 + 1)) - 1 | 0x00000003
+ // 00010 | 2 | 0xffffffff >> (31 - 2) | (1 << (2 + 1)) - 1 | 0x00000007
+ // ..... |
+ // 11110 | 30 | 0xffffffff >> (31 - 30) | (1 << (30 + 1)) - 1 | 0x7fffffff
+ // 11111 | 31 | 0xffffffff >> (31 - 31) | last_full_word++ | 0xffffffff
+ uint32_t partial_word_mask = 0xffffffff >> (0x1f - partial_word_bits);
+
+ uint32_t count = 0;
+ for (uint32_t word = 0; word < last_word; word++) {
+ count += __builtin_popcount(storage_[word]);
+ }
+ count += __builtin_popcount(storage_[last_word] & partial_word_mask);
+ return count;
+}
+
+BitVector::Iterator* BitVector::GetIterator() const {
+ return new (allocator_) Iterator(this);
+}
+
+/*
+ * Mark specified number of bits as "set". Cannot set all bits like ClearAll
+ * since there might be unused bits - setting those to one will confuse the
+ * iterator.
+ */
+void BitVector::SetInitialBits(uint32_t num_bits) {
+ DCHECK_LE(BitsToWords(num_bits), storage_size_);
+ uint32_t idx;
+ for (idx = 0; idx < (num_bits >> 5); idx++) {
+ storage_[idx] = -1;
+ }
+ uint32_t rem_num_bits = num_bits & 0x1f;
+ if (rem_num_bits) {
+ storage_[idx] = (1 << rem_num_bits) - 1;
+ }
+}
+
+} // namespace art
diff --git a/runtime/base/bit_vector.h b/runtime/base/bit_vector.h
new file mode 100644
index 0000000..74bec08
--- /dev/null
+++ b/runtime/base/bit_vector.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_BIT_VECTOR_H_
+#define ART_RUNTIME_BASE_BIT_VECTOR_H_
+
+#include <stdint.h>
+#include <stddef.h>
+
+#include "allocator.h"
+#include "base/logging.h"
+#include "utils.h"
+
+namespace art {
+
+/*
+ * Expanding bitmap, used for tracking resources. Bits are numbered starting
+ * from zero. All operations on a BitVector are unsynchronized.
+ */
+class BitVector {
+ public:
+ class Iterator {
+ public:
+ explicit Iterator(const BitVector* bit_vector)
+ : p_bits_(bit_vector),
+ bit_storage_(bit_vector->GetRawStorage()),
+ bit_index_(0),
+ bit_size_(p_bits_->storage_size_ * sizeof(uint32_t) * 8) {}
+
+ // Return the position of the next set bit. -1 means end-of-element reached.
+ int32_t Next() {
+ // Did anything obviously change since we started?
+ DCHECK_EQ(bit_size_, p_bits_->GetStorageSize() * sizeof(uint32_t) * 8);
+ DCHECK_EQ(bit_storage_, p_bits_->GetRawStorage());
+
+ if (UNLIKELY(bit_index_ >= bit_size_)) return -1;
+
+ uint32_t word_index = bit_index_ / 32;
+ uint32_t word = bit_storage_[word_index];
+ // Mask out any bits in the first word we've already considered.
+ word >>= bit_index_ & 0x1f;
+ if (word == 0) {
+ bit_index_ &= ~0x1f;
+ do {
+ word_index++;
+ if (UNLIKELY((word_index * 32) >= bit_size_)) {
+ bit_index_ = bit_size_;
+ return -1;
+ }
+ word = bit_storage_[word_index];
+ bit_index_ += 32;
+ } while (word == 0);
+ }
+ bit_index_ += CTZ(word) + 1;
+ return bit_index_ - 1;
+ }
+
+ static void* operator new(size_t size, Allocator* allocator) {
+ return allocator->Alloc(sizeof(BitVector::Iterator));
+ };
+ static void operator delete(void* p) {
+ Iterator* it = reinterpret_cast<Iterator*>(p);
+ it->p_bits_->allocator_->Free(p);
+ }
+
+ private:
+ const BitVector* const p_bits_;
+ const uint32_t* const bit_storage_;
+ uint32_t bit_index_; // Current index (size in bits).
+ const uint32_t bit_size_; // Size of vector in bits.
+
+ friend class BitVector;
+ };
+
+ BitVector(uint32_t start_bits,
+ bool expandable,
+ Allocator* allocator,
+ uint32_t storage_size = 0,
+ uint32_t* storage = NULL);
+
+ virtual ~BitVector();
+
+ void SetBit(uint32_t num);
+ void ClearBit(uint32_t num);
+ bool IsBitSet(uint32_t num) const;
+ void ClearAllBits();
+ void SetInitialBits(uint32_t num_bits);
+ void Copy(BitVector* src) {
+ memcpy(storage_, src->GetRawStorage(), sizeof(uint32_t) * storage_size_);
+ }
+ void Intersect(const BitVector* src2);
+ void Union(const BitVector* src);
+ // Are we equal to another bit vector? Note: expandability attributes must also match.
+ bool Equal(const BitVector* src) {
+ return (storage_size_ == src->GetStorageSize()) &&
+ (expandable_ == src->IsExpandable()) &&
+ (memcmp(storage_, src->GetRawStorage(), storage_size_ * sizeof(uint32_t)) == 0);
+ }
+ uint32_t NumSetBits() const;
+ uint32_t NumSetBits(uint32_t num) const;
+
+ Iterator* GetIterator() const;
+
+ uint32_t GetStorageSize() const { return storage_size_; }
+ bool IsExpandable() const { return expandable_; }
+ uint32_t GetRawStorageWord(size_t idx) const { return storage_[idx]; }
+ uint32_t* GetRawStorage() { return storage_; }
+ const uint32_t* GetRawStorage() const { return storage_; }
+ size_t GetSizeOf() const { return storage_size_ * sizeof(uint32_t); }
+
+ private:
+ Allocator* const allocator_;
+ const bool expandable_; // expand bitmap if we run out?
+ uint32_t storage_size_; // current size, in 32-bit words.
+ uint32_t* storage_;
+};
+
+
+} // namespace art
+
+#endif // ART_RUNTIME_BASE_BIT_VECTOR_H_
diff --git a/runtime/base/bit_vector_test.cc b/runtime/base/bit_vector_test.cc
new file mode 100644
index 0000000..d99d059
--- /dev/null
+++ b/runtime/base/bit_vector_test.cc
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "UniquePtr.h"
+#include "bit_vector.h"
+#include "gtest/gtest.h"
+
+namespace art {
+
+TEST(BitVector, Test) {
+ const size_t kBits = 32;
+
+ BitVector bv(kBits, false, Allocator::GetMallocAllocator());
+ EXPECT_EQ(1U, bv.GetStorageSize());
+ EXPECT_EQ(kWordSize, bv.GetSizeOf());
+ EXPECT_FALSE(bv.IsExpandable());
+
+ EXPECT_EQ(0U, bv.NumSetBits());
+ EXPECT_EQ(0U, bv.NumSetBits(0));
+ EXPECT_EQ(0U, bv.NumSetBits(kBits - 1));
+ for (size_t i = 0; i < kBits; i++) {
+ EXPECT_FALSE(bv.IsBitSet(i));
+ }
+ EXPECT_EQ(0U, bv.GetRawStorageWord(0));
+ EXPECT_EQ(0U, *bv.GetRawStorage());
+
+ BitVector::Iterator empty_iterator(&bv);
+ EXPECT_EQ(-1, empty_iterator.Next());
+
+ UniquePtr<BitVector::Iterator> empty_iterator_on_heap(bv.GetIterator());
+ EXPECT_EQ(-1, empty_iterator_on_heap->Next());
+
+ bv.SetBit(0);
+ bv.SetBit(kBits - 1);
+ EXPECT_EQ(2U, bv.NumSetBits());
+ EXPECT_EQ(1U, bv.NumSetBits(0));
+ EXPECT_EQ(2U, bv.NumSetBits(kBits - 1));
+ EXPECT_TRUE(bv.IsBitSet(0));
+ for (size_t i = 1; i < kBits - 1; i++) {
+ EXPECT_FALSE(bv.IsBitSet(i));
+ }
+ EXPECT_TRUE(bv.IsBitSet(kBits - 1));
+ EXPECT_EQ(0x80000001U, bv.GetRawStorageWord(0));
+ EXPECT_EQ(0x80000001U, *bv.GetRawStorage());
+
+ BitVector::Iterator iterator(&bv);
+ EXPECT_EQ(0, iterator.Next());
+ EXPECT_EQ(static_cast<int>(kBits - 1), iterator.Next());
+ EXPECT_EQ(-1, iterator.Next());
+}
+
+TEST(BitVector, NoopAllocator) {
+ const uint32_t kWords = 2;
+
+ uint32_t bits[kWords];
+ memset(bits, 0, sizeof(bits));
+
+ BitVector bv(0U, false, Allocator::GetNoopAllocator(), kWords, bits);
+ EXPECT_EQ(kWords, bv.GetStorageSize());
+ EXPECT_EQ(kWords * kWordSize, bv.GetSizeOf());
+ EXPECT_EQ(bits, bv.GetRawStorage());
+ EXPECT_EQ(0U, bv.NumSetBits());
+
+ bv.SetBit(8);
+ EXPECT_EQ(1U, bv.NumSetBits());
+ EXPECT_EQ(0x00000100U, bv.GetRawStorageWord(0));
+ EXPECT_EQ(0x00000000U, bv.GetRawStorageWord(1));
+ EXPECT_EQ(1U, bv.NumSetBits());
+
+ bv.SetBit(16);
+ EXPECT_EQ(2U, bv.NumSetBits());
+ EXPECT_EQ(0x00010100U, bv.GetRawStorageWord(0));
+ EXPECT_EQ(0x00000000U, bv.GetRawStorageWord(1));
+ EXPECT_EQ(2U, bv.NumSetBits());
+
+ bv.SetBit(32);
+ EXPECT_EQ(3U, bv.NumSetBits());
+ EXPECT_EQ(0x00010100U, bv.GetRawStorageWord(0));
+ EXPECT_EQ(0x00000001U, bv.GetRawStorageWord(1));
+ EXPECT_EQ(3U, bv.NumSetBits());
+
+ bv.SetBit(48);
+ EXPECT_EQ(4U, bv.NumSetBits());
+ EXPECT_EQ(0x00010100U, bv.GetRawStorageWord(0));
+ EXPECT_EQ(0x00010001U, bv.GetRawStorageWord(1));
+ EXPECT_EQ(4U, bv.NumSetBits());
+
+ EXPECT_EQ(0U, bv.NumSetBits(0));
+
+ EXPECT_EQ(0U, bv.NumSetBits(7));
+ EXPECT_EQ(1U, bv.NumSetBits(8));
+ EXPECT_EQ(1U, bv.NumSetBits(9));
+
+ EXPECT_EQ(1U, bv.NumSetBits(15));
+ EXPECT_EQ(2U, bv.NumSetBits(16));
+ EXPECT_EQ(2U, bv.NumSetBits(17));
+
+ EXPECT_EQ(2U, bv.NumSetBits(31));
+ EXPECT_EQ(3U, bv.NumSetBits(32));
+ EXPECT_EQ(3U, bv.NumSetBits(33));
+
+ EXPECT_EQ(3U, bv.NumSetBits(47));
+ EXPECT_EQ(4U, bv.NumSetBits(48));
+ EXPECT_EQ(4U, bv.NumSetBits(49));
+
+ EXPECT_EQ(4U, bv.NumSetBits(63));
+}
+
+} // namespace art
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index 6531858..d00c64a 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -138,8 +138,10 @@
#if defined (__APPLE__)
#define HOT_ATTR
+#define COLD_ATTR
#else
#define HOT_ATTR __attribute__ ((hot))
+#define COLD_ATTR __attribute__ ((cold))
#endif
#define PURE __attribute__ ((__pure__))
diff --git a/runtime/base/unix_file/fd_file.cc b/runtime/base/unix_file/fd_file.cc
index 36f8ba7..f48c76d 100644
--- a/runtime/base/unix_file/fd_file.cc
+++ b/runtime/base/unix_file/fd_file.cc
@@ -102,10 +102,6 @@
return fd_ >= 0;
}
-std::string FdFile::GetPath() const {
- return file_path_;
-}
-
bool FdFile::ReadFully(void* buffer, int64_t byte_count) {
char* ptr = static_cast<char*>(buffer);
while (byte_count > 0) {
diff --git a/runtime/base/unix_file/fd_file.h b/runtime/base/unix_file/fd_file.h
index 79a0db9..19e3511 100644
--- a/runtime/base/unix_file/fd_file.h
+++ b/runtime/base/unix_file/fd_file.h
@@ -57,7 +57,9 @@
// Bonus API.
int Fd() const;
bool IsOpened() const;
- std::string GetPath() const;
+ const std::string& GetPath() const {
+ return file_path_;
+ }
void DisableAutoClose();
bool ReadFully(void* buffer, int64_t byte_count);
bool WriteFully(const void* buffer, int64_t byte_count);
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index aa5f2bf..03f2c9d 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -283,7 +283,7 @@
SirtRef<mirror::Class>
java_lang_DexCache(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::DexCacheClass)));
SetClassRoot(kJavaLangDexCache, java_lang_DexCache.get());
- java_lang_DexCache->SetObjectSize(sizeof(mirror::DexCacheClass));
+ java_lang_DexCache->SetObjectSize(sizeof(mirror::DexCache));
java_lang_DexCache->SetStatus(mirror::Class::kStatusResolved, self);
// Constructor, Field, Method, and AbstractMethod are necessary so that FindClass can link members.
@@ -540,9 +540,10 @@
}
}
-bool ClassLinker::GenerateOatFile(const std::string& dex_filename,
+bool ClassLinker::GenerateOatFile(const char* dex_filename,
int oat_fd,
- const std::string& oat_cache_filename) {
+ const char* oat_cache_filename) {
+ Locks::mutator_lock_->AssertNotHeld(Thread::Current()); // Avoid starving GC.
std::string dex2oat_string(GetAndroidRoot());
dex2oat_string += (kIsDebugBuild ? "/bin/dex2oatd" : "/bin/dex2oat");
const char* dex2oat = dex2oat_string.c_str();
@@ -567,7 +568,8 @@
const char* oat_location_option = oat_location_option_string.c_str();
std::string oat_compiler_filter_string("-compiler-filter:");
- switch (Runtime::Current()->GetCompilerFilter()) {
+ Runtime::CompilerFilter filter = Runtime::Current()->GetCompilerFilter();
+ switch (filter) {
case Runtime::kInterpretOnly:
oat_compiler_filter_string += "interpret-only";
break;
@@ -584,7 +586,7 @@
oat_compiler_filter_string += "everything";
break;
default:
- LOG(FATAL) << "Unexpected case.";
+ LOG(FATAL) << "Unexpected case: " << filter;
}
const char* oat_compiler_filter_option = oat_compiler_filter_string.c_str();
@@ -633,49 +635,55 @@
int status;
pid_t got_pid = TEMP_FAILURE_RETRY(waitpid(pid, &status, 0));
if (got_pid != pid) {
- PLOG(ERROR) << "waitpid failed: wanted " << pid << ", got " << got_pid;
+ ScopedObjectAccess soa(Thread::Current());
+ ThrowIOException("Failed to create oat file. Waitpid failed: wanted %d, got %d", pid,
+ got_pid);
return false;
}
if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
- LOG(ERROR) << dex2oat << " failed with dex-file=" << dex_filename;
+ ScopedObjectAccess soa(Thread::Current());
+ ThrowIOException("Failed to create oat file. %s failed with dex-file '%s'", dex2oat,
+ dex_filename);
return false;
}
}
return true;
}
-void ClassLinker::RegisterOatFile(const OatFile& oat_file) {
+const OatFile* ClassLinker::RegisterOatFile(const OatFile* oat_file) {
WriterMutexLock mu(Thread::Current(), dex_lock_);
- RegisterOatFileLocked(oat_file);
-}
-
-void ClassLinker::RegisterOatFileLocked(const OatFile& oat_file) {
- dex_lock_.AssertExclusiveHeld(Thread::Current());
- if (kIsDebugBuild) {
- for (size_t i = 0; i < oat_files_.size(); ++i) {
- CHECK_NE(&oat_file, oat_files_[i]) << oat_file.GetLocation();
+ for (size_t i = 0; i < oat_files_.size(); ++i) {
+ if (UNLIKELY(oat_file->GetLocation() == oat_files_[i]->GetLocation())) {
+ VLOG(class_linker) << "Attempt to register oat file that's already registered: "
+ << oat_file->GetLocation();
+ for (size_t j = i; j < oat_files_.size(); ++j) {
+ CHECK_NE(oat_file, oat_files_[j]) << "Attempt to re-register dex file.";
+ }
+ delete oat_file;
+ return oat_files_[i];
}
}
- VLOG(class_linker) << "Registering " << oat_file.GetLocation();
- oat_files_.push_back(&oat_file);
+ VLOG(class_linker) << "Registering " << oat_file->GetLocation();
+ oat_files_.push_back(oat_file);
+ return oat_file;
}
OatFile& ClassLinker::GetImageOatFile(gc::space::ImageSpace* space) {
VLOG(startup) << "ClassLinker::GetImageOatFile entering";
- OatFile& oat_file = space->ReleaseOatFile();
- WriterMutexLock mu(Thread::Current(), dex_lock_);
- RegisterOatFileLocked(oat_file);
+ OatFile* oat_file = space->ReleaseOatFile();
+ CHECK_EQ(RegisterOatFile(oat_file), oat_file);
VLOG(startup) << "ClassLinker::GetImageOatFile exiting";
- return oat_file;
+ return *oat_file;
}
const OatFile* ClassLinker::FindOpenedOatFileForDexFile(const DexFile& dex_file) {
- ReaderMutexLock mu(Thread::Current(), dex_lock_);
- return FindOpenedOatFileFromDexLocation(dex_file.GetLocation(), dex_file.GetLocationChecksum());
+ return FindOpenedOatFileFromDexLocation(dex_file.GetLocation().c_str(),
+ dex_file.GetLocationChecksum());
}
-const OatFile* ClassLinker::FindOpenedOatFileFromDexLocation(const std::string& dex_location,
+const OatFile* ClassLinker::FindOpenedOatFileFromDexLocation(const char* dex_location,
uint32_t dex_location_checksum) {
+ ReaderMutexLock mu(Thread::Current(), dex_lock_);
for (size_t i = 0; i < oat_files_.size(); i++) {
const OatFile* oat_file = oat_files_[i];
DCHECK(oat_file != NULL);
@@ -689,82 +697,83 @@
return NULL;
}
-const DexFile* ClassLinker::FindDexFileInOatLocation(const std::string& dex_location,
+const DexFile* ClassLinker::FindDexFileInOatLocation(const char* dex_location,
uint32_t dex_location_checksum,
- const std::string& oat_location) {
+ const char* oat_location,
+ std::string* error_msg) {
UniquePtr<OatFile> oat_file(OatFile::Open(oat_location, oat_location, NULL,
- !Runtime::Current()->IsCompiler()));
- if (oat_file.get() == NULL) {
- VLOG(class_linker) << "Failed to find existing oat file at " << oat_location;
- return NULL;
+ !Runtime::Current()->IsCompiler(),
+ error_msg));
+ if (oat_file.get() == nullptr) {
+ *error_msg = StringPrintf("Failed to find existing oat file at %s: %s", oat_location,
+ error_msg->c_str());
+ return nullptr;
}
Runtime* runtime = Runtime::Current();
const ImageHeader& image_header = runtime->GetHeap()->GetImageSpace()->GetImageHeader();
uint32_t expected_image_oat_checksum = image_header.GetOatChecksum();
uint32_t actual_image_oat_checksum = oat_file->GetOatHeader().GetImageFileLocationOatChecksum();
if (expected_image_oat_checksum != actual_image_oat_checksum) {
- VLOG(class_linker) << "Failed to find oat file at " << oat_location
- << " with expected image oat checksum of " << expected_image_oat_checksum
- << ", found " << actual_image_oat_checksum;
- return NULL;
+ *error_msg = StringPrintf("Failed to find oat file at '%s' with expected image oat checksum of "
+ "0x%x, found 0x%x", oat_location, expected_image_oat_checksum,
+ actual_image_oat_checksum);
+ return nullptr;
}
uint32_t expected_image_oat_offset = reinterpret_cast<uint32_t>(image_header.GetOatDataBegin());
uint32_t actual_image_oat_offset = oat_file->GetOatHeader().GetImageFileLocationOatDataBegin();
if (expected_image_oat_offset != actual_image_oat_offset) {
- VLOG(class_linker) << "Failed to find oat file at " << oat_location
- << " with expected image oat offset " << expected_image_oat_offset
- << ", found " << actual_image_oat_offset;
- return NULL;
+ *error_msg = StringPrintf("Failed to find oat file at '%s' with expected image oat offset %ud, "
+ "found %ud", oat_location, expected_image_oat_offset,
+ actual_image_oat_offset);
+ return nullptr;
}
- const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_location, &dex_location_checksum);
- if (oat_dex_file == NULL) {
- VLOG(class_linker) << "Failed to find oat file at " << oat_location << " containing " << dex_location;
- return NULL;
+ // TODO: this registers the oat file now as we may use the oat_dex_file later and we want the
+ // intern behavior of RegisterOatFile. However, if we take an early return we could remove
+ // the oat file.
+ const OatFile* opened_oat_file = RegisterOatFile(oat_file.release());
+ const OatFile::OatDexFile* oat_dex_file = opened_oat_file->GetOatDexFile(dex_location,
+ &dex_location_checksum);
+ if (oat_dex_file == nullptr) {
+ *error_msg = StringPrintf("Failed to find oat file at '%s' containing '%s'", oat_location,
+ dex_location);
+ return nullptr;
}
uint32_t expected_dex_checksum = dex_location_checksum;
uint32_t actual_dex_checksum = oat_dex_file->GetDexFileLocationChecksum();
if (expected_dex_checksum != actual_dex_checksum) {
- VLOG(class_linker) << "Failed to find oat file at " << oat_location
- << " with expected dex checksum of " << expected_dex_checksum
- << ", found " << actual_dex_checksum;
- return NULL;
+ *error_msg = StringPrintf("Failed to find oat file at '%s' with expected dex checksum of 0x%x, "
+ "found 0x%x", oat_location, expected_dex_checksum,
+ actual_dex_checksum);
+ return nullptr;
}
- RegisterOatFileLocked(*oat_file.release());
- return oat_dex_file->OpenDexFile();
-}
-
-const DexFile* ClassLinker::FindOrCreateOatFileForDexLocation(const std::string& dex_location,
- uint32_t dex_location_checksum,
- const std::string& oat_location) {
- WriterMutexLock mu(Thread::Current(), dex_lock_);
- return FindOrCreateOatFileForDexLocationLocked(dex_location, dex_location_checksum, oat_location);
+ return oat_dex_file->OpenDexFile(error_msg);
}
class ScopedFlock {
public:
ScopedFlock() {}
- bool Init(const std::string& filename) {
+ bool Init(const char* filename, std::string* error_msg) {
while (true) {
- file_.reset(OS::OpenFileWithFlags(filename.c_str(), O_CREAT | O_RDWR));
+ file_.reset(OS::OpenFileWithFlags(filename, O_CREAT | O_RDWR));
if (file_.get() == NULL) {
- LOG(ERROR) << "Failed to open file: " << filename;
+ *error_msg = StringPrintf("Failed to open file '%s'", filename);
return false;
}
int flock_result = TEMP_FAILURE_RETRY(flock(file_->Fd(), LOCK_EX));
if (flock_result != 0) {
- PLOG(ERROR) << "Failed to lock file: " << filename;
+ *error_msg = StringPrintf("Failed to lock file '%s': %s", filename, strerror(errno));
return false;
}
struct stat fstat_stat;
int fstat_result = TEMP_FAILURE_RETRY(fstat(file_->Fd(), &fstat_stat));
if (fstat_result != 0) {
- PLOG(ERROR) << "Failed to fstat: " << filename;
+ *error_msg = StringPrintf("Failed to fstat file '%s': %s", filename, strerror(errno));
return false;
}
struct stat stat_stat;
- int stat_result = TEMP_FAILURE_RETRY(stat(filename.c_str(), &stat_stat));
+ int stat_result = TEMP_FAILURE_RETRY(stat(filename, &stat_stat));
if (stat_result != 0) {
PLOG(WARNING) << "Failed to stat, will retry: " << filename;
// ENOENT can happen if someone racing with us unlinks the file we created so just retry.
@@ -795,49 +804,54 @@
DISALLOW_COPY_AND_ASSIGN(ScopedFlock);
};
-const DexFile* ClassLinker::FindOrCreateOatFileForDexLocationLocked(const std::string& dex_location,
- uint32_t dex_location_checksum,
- const std::string& oat_location) {
+const DexFile* ClassLinker::FindOrCreateOatFileForDexLocation(const char* dex_location,
+ uint32_t dex_location_checksum,
+ const char* oat_location,
+ std::string* error_msg) {
// We play a locking game here so that if two different processes
// race to generate (or worse, one tries to open a partial generated
// file) we will be okay. This is actually common with apps that use
// DexClassLoader to work around the dex method reference limit and
// that have a background service running in a separate process.
ScopedFlock scoped_flock;
- if (!scoped_flock.Init(oat_location)) {
- LOG(ERROR) << "Failed to open locked oat file: " << oat_location;
- return NULL;
+ if (!scoped_flock.Init(oat_location, error_msg)) {
+ return nullptr;
}
// Check if we already have an up-to-date output file
- const DexFile* dex_file = FindDexFileInOatLocation(dex_location,
- dex_location_checksum,
- oat_location);
- if (dex_file != NULL) {
+ const DexFile* dex_file = FindDexFileInOatLocation(dex_location, dex_location_checksum,
+ oat_location, error_msg);
+ if (dex_file != nullptr) {
return dex_file;
}
+ VLOG(class_linker) << "Failed to find dex file '" << dex_location << "' in oat location '"
+ << oat_location << "': " << *error_msg;
+ error_msg->clear();
// Generate the output oat file for the dex file
VLOG(class_linker) << "Generating oat file " << oat_location << " for " << dex_location;
if (!GenerateOatFile(dex_location, scoped_flock.GetFile().Fd(), oat_location)) {
- LOG(ERROR) << "Failed to generate oat file: " << oat_location;
- return NULL;
+ CHECK(Thread::Current()->IsExceptionPending());
+ return nullptr;
}
const OatFile* oat_file = OatFile::Open(oat_location, oat_location, NULL,
- !Runtime::Current()->IsCompiler());
- if (oat_file == NULL) {
- LOG(ERROR) << "Failed to open generated oat file: " << oat_location;
- return NULL;
+ !Runtime::Current()->IsCompiler(),
+ error_msg);
+ if (oat_file == nullptr) {
+ *error_msg = StringPrintf("Failed to open generated oat file '%s': %s",
+ oat_location, error_msg->c_str());
+ return nullptr;
}
- RegisterOatFileLocked(*oat_file);
- const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_location, &dex_location_checksum);
- if (oat_dex_file == NULL) {
- LOG(ERROR) << "Failed to find dex file " << dex_location
- << " (checksum " << dex_location_checksum
- << ") in generated oat file: " << oat_location;
- return NULL;
+ oat_file = RegisterOatFile(oat_file);
+ const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_location,
+ &dex_location_checksum);
+ if (oat_dex_file == nullptr) {
+ *error_msg = StringPrintf("Failed to find dex file '%s' (checksum 0x%x) in generated out file "
+ "'%s'", dex_location, dex_location_checksum, oat_location);
+ return nullptr;
}
- const DexFile* result = oat_dex_file->OpenDexFile();
+ const DexFile* result = oat_dex_file->OpenDexFile(error_msg);
+ CHECK(result != nullptr) << *error_msg;
CHECK_EQ(dex_location_checksum, result->GetLocationChecksum())
<< "dex_location=" << dex_location << " oat_location=" << oat_location << std::hex
<< " dex_location_checksum=" << dex_location_checksum
@@ -846,8 +860,9 @@
}
bool ClassLinker::VerifyOatFileChecksums(const OatFile* oat_file,
- const std::string& dex_location,
- uint32_t dex_location_checksum) {
+ const char* dex_location,
+ uint32_t dex_location_checksum,
+ std::string* error_msg) {
Runtime* runtime = Runtime::Current();
const ImageHeader& image_header = runtime->GetHeap()->GetImageSpace()->GetImageHeader();
uint32_t image_oat_checksum = image_header.GetOatChecksum();
@@ -857,14 +872,14 @@
const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_location, &dex_location_checksum);
if (oat_dex_file == NULL) {
- LOG(ERROR) << "oat file " << oat_file->GetLocation()
- << " does not contain contents for " << dex_location
- << " with checksum " << dex_location_checksum;
+ *error_msg = StringPrintf("oat file '%s' does not contain contents for '%s' with checksum 0x%x",
+ oat_file->GetLocation().c_str(), dex_location, dex_location_checksum);
std::vector<const OatFile::OatDexFile*> oat_dex_files = oat_file->GetOatDexFiles();
for (size_t i = 0; i < oat_dex_files.size(); i++) {
const OatFile::OatDexFile* oat_dex_file = oat_dex_files[i];
- LOG(ERROR) << "oat file " << oat_file->GetLocation()
- << " contains contents for " << oat_dex_file->GetDexFileLocation();
+ *error_msg += StringPrintf("\noat file '%s' contains contents for '%s'",
+ oat_file->GetLocation().c_str(),
+ oat_dex_file->GetDexFileLocation().c_str());
}
return false;
}
@@ -875,116 +890,123 @@
}
if (!image_check) {
- std::string image_file(image_header.GetImageRoot(
- ImageHeader::kOatLocation)->AsString()->ToModifiedUtf8());
- LOG(WARNING) << "oat file " << oat_file->GetLocation()
- << " mismatch (" << std::hex << oat_file->GetOatHeader().GetImageFileLocationOatChecksum()
- << ", " << oat_file->GetOatHeader().GetImageFileLocationOatDataBegin()
- << ") with " << image_file
- << " (" << image_oat_checksum << ", " << std::hex << image_oat_data_begin << ")";
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::String* oat_location = image_header.GetImageRoot(ImageHeader::kOatLocation)->AsString();
+ std::string image_file(oat_location->ToModifiedUtf8());
+ *error_msg = StringPrintf("oat file '%s' mismatch (0x%x, %d) with '%s' (0x%x, %d)",
+ oat_file->GetLocation().c_str(),
+ oat_file->GetOatHeader().GetImageFileLocationOatChecksum(),
+ oat_file->GetOatHeader().GetImageFileLocationOatDataBegin(),
+ image_file.c_str(), image_oat_checksum, image_oat_data_begin);
}
if (!dex_check) {
- LOG(WARNING) << "oat file " << oat_file->GetLocation()
- << " mismatch (" << std::hex << oat_dex_file->GetDexFileLocationChecksum()
- << ") with " << dex_location
- << " (" << std::hex << dex_location_checksum << ")";
+ *error_msg = StringPrintf("oat file '%s' mismatch (0x%x) with '%s' (0x%x)",
+ oat_file->GetLocation().c_str(),
+ oat_dex_file->GetDexFileLocationChecksum(),
+ dex_location, dex_location_checksum);
}
return false;
}
-const DexFile* ClassLinker::VerifyAndOpenDexFileFromOatFile(const OatFile* oat_file,
- const std::string& dex_location,
- uint32_t dex_location_checksum) {
- bool verified = VerifyOatFileChecksums(oat_file, dex_location, dex_location_checksum);
- if (!verified) {
- delete oat_file;
- return NULL;
+const DexFile* ClassLinker::VerifyAndOpenDexFileFromOatFile(const std::string& oat_file_location,
+ const char* dex_location,
+ std::string* error_msg,
+ bool* open_failed) {
+ UniquePtr<const OatFile> oat_file(FindOatFileFromOatLocation(oat_file_location, error_msg));
+ if (oat_file.get() == nullptr) {
+ *open_failed = true;
+ return nullptr;
}
- RegisterOatFileLocked(*oat_file);
- return oat_file->GetOatDexFile(dex_location, &dex_location_checksum)->OpenDexFile();
+ *open_failed = false;
+ uint32_t dex_location_checksum;
+ if (!DexFile::GetChecksum(dex_location, &dex_location_checksum, error_msg)) {
+ // If no classes.dex found in dex_location, it has been stripped or is corrupt, assume oat is
+ // up-to-date. This is the common case in user builds for jar's and apk's in the /system
+ // directory.
+ const OatFile* opened_oat_file = oat_file.release();
+ opened_oat_file = RegisterOatFile(opened_oat_file);
+ const OatFile::OatDexFile* oat_dex_file = opened_oat_file->GetOatDexFile(dex_location, NULL);
+ if (oat_dex_file == nullptr) {
+ *error_msg = StringPrintf("Dex checksum mismatch for location '%s' and failed to find oat "
+ "dex file '%s': %s", oat_file_location.c_str(), dex_location,
+ error_msg->c_str());
+ return nullptr;
+ }
+ return oat_dex_file->OpenDexFile(error_msg);
+ }
+
+ bool verified = VerifyOatFileChecksums(oat_file.get(), dex_location, dex_location_checksum,
+ error_msg);
+ if (!verified) {
+ return nullptr;
+ }
+ const OatFile* opened_oat_file = oat_file.release();
+ opened_oat_file = RegisterOatFile(opened_oat_file);
+ return opened_oat_file->GetOatDexFile(dex_location,
+ &dex_location_checksum)->OpenDexFile(error_msg);
}
-const DexFile* ClassLinker::FindDexFileInOatFileFromDexLocation(const std::string& dex_location,
- uint32_t dex_location_checksum) {
- WriterMutexLock mu(Thread::Current(), dex_lock_);
-
+const DexFile* ClassLinker::FindDexFileInOatFileFromDexLocation(const char* dex_location,
+ uint32_t dex_location_checksum,
+ std::string* error_msg) {
const OatFile* open_oat_file = FindOpenedOatFileFromDexLocation(dex_location,
dex_location_checksum);
- if (open_oat_file != NULL) {
- return open_oat_file->GetOatDexFile(dex_location, &dex_location_checksum)->OpenDexFile();
+ if (open_oat_file != nullptr) {
+ const OatFile::OatDexFile* oat_dex_file = open_oat_file->GetOatDexFile(dex_location,
+ &dex_location_checksum);
+ return oat_dex_file->OpenDexFile(error_msg);
}
// Look for an existing file next to dex. for example, for
// /foo/bar/baz.jar, look for /foo/bar/baz.odex.
std::string odex_filename(OatFile::DexFilenameToOdexFilename(dex_location));
- UniquePtr<const OatFile> oat_file(FindOatFileFromOatLocationLocked(odex_filename));
- if (oat_file.get() != NULL) {
- uint32_t dex_location_checksum;
- if (!DexFile::GetChecksum(dex_location, &dex_location_checksum)) {
- // If no classes.dex found in dex_location, it has been stripped, assume oat is up-to-date.
- // This is the common case in user builds for jar's and apk's in the /system directory.
- const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_location, NULL);
- CHECK(oat_dex_file != NULL) << odex_filename << " " << dex_location;
- RegisterOatFileLocked(*oat_file);
- return oat_dex_file->OpenDexFile();
- }
- const DexFile* dex_file = VerifyAndOpenDexFileFromOatFile(oat_file.release(),
- dex_location,
- dex_location_checksum);
- if (dex_file != NULL) {
- return dex_file;
- }
+ bool open_failed;
+ const DexFile* dex_file = VerifyAndOpenDexFileFromOatFile(odex_filename, dex_location,
+ error_msg, &open_failed);
+ if (dex_file != nullptr) {
+ return dex_file;
}
- // Look for an existing file in the dalvik-cache, validating the result if found
- // not found in /foo/bar/baz.odex? try /data/dalvik-cache/foo@bar@baz.jar@classes.dex
+ std::string cache_error_msg;
std::string cache_location(GetDalvikCacheFilenameOrDie(dex_location));
- oat_file.reset(FindOatFileFromOatLocationLocked(cache_location));
- if (oat_file.get() != NULL) {
- uint32_t dex_location_checksum;
- if (!DexFile::GetChecksum(dex_location, &dex_location_checksum)) {
- LOG(WARNING) << "Failed to compute checksum: " << dex_location;
- return NULL;
- }
- const DexFile* dex_file = VerifyAndOpenDexFileFromOatFile(oat_file.release(),
- dex_location,
- dex_location_checksum);
- if (dex_file != NULL) {
- return dex_file;
- }
- if (TEMP_FAILURE_RETRY(unlink(cache_location.c_str())) != 0) {
- PLOG(FATAL) << "Failed to remove obsolete oat file from " << cache_location;
- }
+ dex_file = VerifyAndOpenDexFileFromOatFile(cache_location, dex_location, &cache_error_msg,
+ &open_failed);
+ if (dex_file != nullptr) {
+ return dex_file;
}
- LOG(INFO) << "Failed to open oat file from " << odex_filename << " or " << cache_location << ".";
+ if (!open_failed && TEMP_FAILURE_RETRY(unlink(cache_location.c_str())) != 0) {
+ PLOG(FATAL) << "Failed to remove obsolete oat file from " << cache_location;
+ }
+ VLOG(class_linker) << "Failed to open oat file from " << odex_filename
+ << " (error '" << *error_msg << "') or " << cache_location
+ << " (error '" << cache_error_msg << "').";
// Try to generate oat file if it wasn't found or was obsolete.
- std::string oat_cache_filename(GetDalvikCacheFilenameOrDie(dex_location));
- return FindOrCreateOatFileForDexLocationLocked(dex_location, dex_location_checksum, oat_cache_filename);
+ error_msg->clear();
+ return FindOrCreateOatFileForDexLocation(dex_location, dex_location_checksum,
+ cache_location.c_str(), error_msg);
}
const OatFile* ClassLinker::FindOpenedOatFileFromOatLocation(const std::string& oat_location) {
+ ReaderMutexLock mu(Thread::Current(), dex_lock_);
for (size_t i = 0; i < oat_files_.size(); i++) {
const OatFile* oat_file = oat_files_[i];
- DCHECK(oat_file != NULL);
+ DCHECK(oat_file != nullptr);
if (oat_file->GetLocation() == oat_location) {
return oat_file;
}
}
- return NULL;
+ return nullptr;
}
-const OatFile* ClassLinker::FindOatFileFromOatLocation(const std::string& oat_location) {
- ReaderMutexLock mu(Thread::Current(), dex_lock_);
- return FindOatFileFromOatLocationLocked(oat_location);
-}
-
-const OatFile* ClassLinker::FindOatFileFromOatLocationLocked(const std::string& oat_location) {
+const OatFile* ClassLinker::FindOatFileFromOatLocation(const std::string& oat_location,
+ std::string* error_msg) {
const OatFile* oat_file = FindOpenedOatFileFromOatLocation(oat_location);
- if (oat_file != NULL) {
+ if (oat_file != nullptr) {
return oat_file;
}
- oat_file = OatFile::Open(oat_location, oat_location, NULL, !Runtime::Current()->IsCompiler());
+ oat_file = OatFile::Open(oat_location, oat_location, NULL, !Runtime::Current()->IsCompiler(),
+ error_msg);
if (oat_file == NULL) {
return NULL;
}
@@ -1041,12 +1063,15 @@
for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
SirtRef<mirror::DexCache> dex_cache(self, dex_caches->Get(i));
const std::string& dex_file_location(dex_cache->GetLocation()->ToModifiedUtf8());
- const OatFile::OatDexFile* oat_dex_file = oat_file.GetOatDexFile(dex_file_location, NULL);
+ const OatFile::OatDexFile* oat_dex_file = oat_file.GetOatDexFile(dex_file_location.c_str(),
+ nullptr);
CHECK(oat_dex_file != NULL) << oat_file.GetLocation() << " " << dex_file_location;
- const DexFile* dex_file = oat_dex_file->OpenDexFile();
+ std::string error_msg;
+ const DexFile* dex_file = oat_dex_file->OpenDexFile(&error_msg);
if (dex_file == NULL) {
LOG(FATAL) << "Failed to open dex file " << dex_file_location
- << " from within oat file " << oat_file.GetLocation();
+ << " from within oat file " << oat_file.GetLocation()
+ << " error '" << error_msg << "'";
}
CHECK_EQ(dex_file->GetLocationChecksum(), oat_dex_file->GetDexFileLocationChecksum());
@@ -1510,7 +1535,7 @@
const OatFile* oat_file = FindOpenedOatFileForDexFile(dex_file);
CHECK(oat_file != NULL) << dex_file.GetLocation();
uint dex_location_checksum = dex_file.GetLocationChecksum();
- const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_file.GetLocation(),
+ const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_file.GetLocation().c_str(),
&dex_location_checksum);
CHECK(oat_dex_file != NULL) << dex_file.GetLocation();
const OatFile::OatClass* oat_class = oat_dex_file->GetOatClass(class_def_idx);
@@ -2559,7 +2584,7 @@
CHECK(oat_file != NULL) << dex_file.GetLocation() << " " << PrettyClass(klass);
uint dex_location_checksum = dex_file.GetLocationChecksum();
- const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_file.GetLocation(),
+ const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_file.GetLocation().c_str(),
&dex_location_checksum);
CHECK(oat_dex_file != NULL) << dex_file.GetLocation() << " " << PrettyClass(klass);
uint16_t class_def_index = klass->GetDexClassDefIndex();
@@ -3911,6 +3936,11 @@
klass->SetNumReferenceInstanceFields(num_reference_fields);
if (!klass->IsVariableSize()) {
DCHECK_GE(size, sizeof(mirror::Object)) << ClassHelper(klass.get(), this).GetDescriptor();
+ size_t previous_size = klass->GetObjectSize();
+ if (previous_size != 0) {
+ // Make sure that we didn't originally have an incorrect size.
+ CHECK_EQ(previous_size, size);
+ }
klass->SetObjectSize(size);
}
}
@@ -4025,7 +4055,7 @@
DCHECK(dex_cache != NULL);
// Check for hit in the dex cache.
mirror::ArtMethod* resolved = dex_cache->GetResolvedMethod(method_idx);
- if (resolved != NULL) {
+ if (resolved != NULL && !resolved->IsRuntimeMethod()) {
return resolved;
}
// Fail, get the declaring class.
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 11ba78b..0bc1b5f 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -215,7 +215,7 @@
LOCKS_EXCLUDED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void RegisterOatFile(const OatFile& oat_file)
+ const OatFile* RegisterOatFile(const OatFile* oat_file)
LOCKS_EXCLUDED(dex_lock_);
const std::vector<const DexFile*>& GetBootClassPath() {
@@ -244,43 +244,37 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Generate an oat file from a dex file
- bool GenerateOatFile(const std::string& dex_filename,
+ bool GenerateOatFile(const char* dex_filename,
int oat_fd,
- const std::string& oat_cache_filename);
+ const char* oat_cache_filename);
+ LOCKS_EXCLUDED(Locks::mutator_lock_);
- const OatFile* FindOatFileFromOatLocation(const std::string& location)
+ const OatFile* FindOatFileFromOatLocation(const std::string& location,
+ std::string* error_msg)
LOCKS_EXCLUDED(dex_lock_);
- const OatFile* FindOatFileFromOatLocationLocked(const std::string& location)
- SHARED_LOCKS_REQUIRED(dex_lock_);
-
// Finds the oat file for a dex location, generating the oat file if
// it is missing or out of date. Returns the DexFile from within the
// created oat file.
- const DexFile* FindOrCreateOatFileForDexLocation(const std::string& dex_location,
+ const DexFile* FindOrCreateOatFileForDexLocation(const char* dex_location,
uint32_t dex_location_checksum,
- const std::string& oat_location)
- LOCKS_EXCLUDED(dex_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const DexFile* FindOrCreateOatFileForDexLocationLocked(const std::string& dex_location,
- uint32_t dex_location_checksum,
- const std::string& oat_location)
- EXCLUSIVE_LOCKS_REQUIRED(dex_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const char* oat_location,
+ std::string* error_msg)
+ LOCKS_EXCLUDED(dex_lock_, Locks::mutator_lock_);
// Find a DexFile within an OatFile given a DexFile location. Note
// that this returns null if the location checksum of the DexFile
// does not match the OatFile.
- const DexFile* FindDexFileInOatFileFromDexLocation(const std::string& location,
- uint32_t location_checksum)
- LOCKS_EXCLUDED(dex_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const DexFile* FindDexFileInOatFileFromDexLocation(const char* location,
+ uint32_t location_checksum,
+ std::string* error_msg)
+ LOCKS_EXCLUDED(dex_lock_, Locks::mutator_lock_);
// Returns true if oat file contains the dex file with the given location and checksum.
static bool VerifyOatFileChecksums(const OatFile* oat_file,
- const std::string& dex_location,
- uint32_t dex_location_checksum)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const char* dex_location,
+ uint32_t dex_location_checksum,
+ std::string* error_msg);
// TODO: replace this with multiple methods that allocate the correct managed type.
template <class T>
@@ -430,8 +424,6 @@
EXCLUSIVE_LOCKS_REQUIRED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsDexFileRegisteredLocked(const DexFile& dex_file) const SHARED_LOCKS_REQUIRED(dex_lock_);
- void RegisterOatFileLocked(const OatFile& oat_file) EXCLUSIVE_LOCKS_REQUIRED(dex_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(dex_lock_);
bool InitializeClass(mirror::Class* klass, bool can_run_clinit, bool can_init_parents)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -493,22 +485,22 @@
const OatFile* FindOpenedOatFileForDexFile(const DexFile& dex_file)
LOCKS_EXCLUDED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const OatFile* FindOpenedOatFileFromDexLocation(const std::string& dex_location,
+ const OatFile* FindOpenedOatFileFromDexLocation(const char* dex_location,
uint32_t dex_location_checksum)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, dex_lock_);
+ LOCKS_EXCLUDED(dex_lock);
const OatFile* FindOpenedOatFileFromOatLocation(const std::string& oat_location)
- SHARED_LOCKS_REQUIRED(dex_lock_);
- const DexFile* FindDexFileInOatLocation(const std::string& dex_location,
+ LOCKS_EXCLUDED(dex_lock_);
+ const DexFile* FindDexFileInOatLocation(const char* dex_location,
uint32_t dex_location_checksum,
- const std::string& oat_location)
- EXCLUSIVE_LOCKS_REQUIRED(dex_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const char* oat_location,
+ std::string* error_msg)
+ LOCKS_EXCLUDED(dex_lock_);
- const DexFile* VerifyAndOpenDexFileFromOatFile(const OatFile* oat_file,
- const std::string& dex_location,
- uint32_t dex_location_checksum)
- EXCLUSIVE_LOCKS_REQUIRED(dex_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const DexFile* VerifyAndOpenDexFileFromOatFile(const std::string& oat_file_location,
+ const char* dex_location,
+ std::string* error_msg,
+ bool* open_failed)
+ LOCKS_EXCLUDED(dex_lock_);
mirror::ArtMethod* CreateProxyConstructor(Thread* self, SirtRef<mirror::Class>& klass,
mirror::Class* proxy_class)
diff --git a/runtime/common_test.h b/runtime/common_test.h
index fe54d03..899eab1 100644
--- a/runtime/common_test.h
+++ b/runtime/common_test.h
@@ -282,9 +282,12 @@
int mkdir_result = mkdir(dalvik_cache_.c_str(), 0700);
ASSERT_EQ(mkdir_result, 0);
- java_lang_dex_file_ = DexFile::Open(GetLibCoreDexFileName(), GetLibCoreDexFileName());
+ std::string error_msg;
+ java_lang_dex_file_ = DexFile::Open(GetLibCoreDexFileName().c_str(),
+ GetLibCoreDexFileName().c_str(), &error_msg);
if (java_lang_dex_file_ == NULL) {
- LOG(FATAL) << "Could not open .dex file '" << GetLibCoreDexFileName() << "'\n";
+ LOG(FATAL) << "Could not open .dex file '" << GetLibCoreDexFileName() << "': "
+ << error_msg << "\n";
}
boot_class_path_.push_back(java_lang_dex_file_);
@@ -423,8 +426,9 @@
filename += "art-test-dex-";
filename += name;
filename += ".jar";
- const DexFile* dex_file = DexFile::Open(filename, filename);
- CHECK(dex_file != NULL) << "Failed to open " << filename;
+ std::string error_msg;
+ const DexFile* dex_file = DexFile::Open(filename.c_str(), filename.c_str(), &error_msg);
+ CHECK(dex_file != NULL) << "Failed to open '" << filename << "': " << error_msg;
CHECK_EQ(PROT_READ, dex_file->GetPermissions());
CHECK(dex_file->IsReadOnly());
opened_dex_files_.push_back(dex_file);
@@ -498,10 +502,12 @@
void ReserveImageSpace() {
// Reserve where the image will be loaded up front so that other parts of test set up don't
// accidentally end up colliding with the fixed memory address when we need to load the image.
+ std::string error_msg;
image_reservation_.reset(MemMap::MapAnonymous("image reservation",
reinterpret_cast<byte*>(ART_BASE_ADDRESS),
(size_t)100 * 1024 * 1024, // 100MB
- PROT_NONE));
+ PROT_NONE, &error_msg));
+ CHECK(image_reservation_.get() != nullptr) << error_msg;
}
void UnreserveImageSpace() {
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 189e3ed..0419dab 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -230,6 +230,15 @@
va_end(args);
}
+// IOException
+
+void ThrowIOException(const char* fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+ ThrowException(NULL, "Ljava/io/IOException;", NULL, fmt, &args);
+ va_end(args);
+}
+
// LinkageError
void ThrowLinkageError(const mirror::Class* referrer, const char* fmt, ...) {
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index 1d77e2d..3164f30 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -22,10 +22,10 @@
namespace art {
namespace mirror {
-class ArtField;
-class ArtMethod;
-class Class;
-class Object;
+ class ArtField;
+ class ArtMethod;
+ class Class;
+ class Object;
} // namespace mirror
class Signature;
class StringPiece;
@@ -34,102 +34,110 @@
// AbstractMethodError
void ThrowAbstractMethodError(const mirror::ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
// ArithmeticException
-void ThrowArithmeticExceptionDivideByZero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+void ThrowArithmeticExceptionDivideByZero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
// ArrayIndexOutOfBoundsException
void ThrowArrayIndexOutOfBoundsException(int index, int length)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
// ArrayStoreException
void ThrowArrayStoreException(const mirror::Class* element_class,
const mirror::Class* array_class)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
// ClassCircularityError
-void ThrowClassCircularityError(mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+void ThrowClassCircularityError(mirror::Class* c)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
// ClassCastException
void ThrowClassCastException(const mirror::Class* dest_type, const mirror::Class* src_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
void ThrowClassCastException(const ThrowLocation* throw_location, const char* msg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
// ClassFormatError
void ThrowClassFormatError(const mirror::Class* referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
// IllegalAccessError
void ThrowIllegalAccessErrorClass(mirror::Class* referrer, mirror::Class* accessed)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIllegalAccessErrorClassForMethodDispatch(mirror::Class* referrer, mirror::Class* accessed,
const mirror::ArtMethod* caller,
const mirror::ArtMethod* called,
InvokeType type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIllegalAccessErrorMethod(mirror::Class* referrer, mirror::ArtMethod* accessed)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIllegalAccessErrorField(mirror::Class* referrer, mirror::ArtField* accessed)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIllegalAccessErrorFinalField(const mirror::ArtMethod* referrer,
mirror::ArtField* accessed)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIllegalAccessError(mirror::Class* referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
// IllegalArgumentException
void ThrowIllegalArgumentException(const ThrowLocation* throw_location, const char* msg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
// IncompatibleClassChangeError
void ThrowIncompatibleClassChangeError(InvokeType expected_type, InvokeType found_type,
mirror::ArtMethod* method,
const mirror::ArtMethod* referrer)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(const mirror::ArtMethod* interface_method,
mirror::Object* this_object,
const mirror::ArtMethod* referrer)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIncompatibleClassChangeErrorField(const mirror::ArtField* resolved_field, bool is_static,
const mirror::ArtMethod* referrer)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIncompatibleClassChangeError(const mirror::Class* referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+
+// IOException
+
+void ThrowIOException(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2)))
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
// LinkageError
void ThrowLinkageError(const mirror::Class* referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
// NegativeArraySizeException
-void ThrowNegativeArraySizeException(int size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+void ThrowNegativeArraySizeException(int size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowNegativeArraySizeException(const char* msg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+void ThrowNegativeArraySizeException(const char* msg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
// NoSuchFieldError
@@ -142,45 +150,45 @@
void ThrowNoSuchMethodError(InvokeType type, mirror::Class* c, const StringPiece& name,
const Signature& signature)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
void ThrowNoSuchMethodError(uint32_t method_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
// NullPointerException
void ThrowNullPointerExceptionForFieldAccess(const ThrowLocation& throw_location,
mirror::ArtField* field,
bool is_read)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
void ThrowNullPointerExceptionForMethodAccess(const ThrowLocation& throw_location,
uint32_t method_idx,
InvokeType type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
void ThrowNullPointerExceptionForMethodAccess(const ThrowLocation& throw_location,
mirror::ArtMethod* method,
InvokeType type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
void ThrowNullPointerExceptionFromDexPC(const ThrowLocation& throw_location)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
void ThrowNullPointerException(const ThrowLocation* throw_location, const char* msg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
// RuntimeException
void ThrowRuntimeException(const char* fmt, ...)
__attribute__((__format__(__printf__, 1, 2)))
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
// VerifyError
void ThrowVerifyError(const mirror::Class* referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
} // namespace art
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index ac133a3..a0f5601 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -62,72 +62,77 @@
reinterpret_cast<const DexFile::ClassDef*>(NULL));
}
-int OpenAndReadMagic(const std::string& filename, uint32_t* magic) {
+static int OpenAndReadMagic(const char* filename, uint32_t* magic, std::string* error_msg) {
CHECK(magic != NULL);
- int fd = open(filename.c_str(), O_RDONLY, 0);
+ int fd = open(filename, O_RDONLY, 0);
if (fd == -1) {
- PLOG(WARNING) << "Unable to open '" << filename << "'";
+ *error_msg = StringPrintf("Unable to open '%s' : %s", filename, strerror(errno));
return -1;
}
int n = TEMP_FAILURE_RETRY(read(fd, magic, sizeof(*magic)));
if (n != sizeof(*magic)) {
- PLOG(ERROR) << "Failed to find magic in '" << filename << "'";
+ *error_msg = StringPrintf("Failed to find magic in '%s'", filename);
return -1;
}
if (lseek(fd, 0, SEEK_SET) != 0) {
- PLOG(ERROR) << "Failed to seek to beginning of file '" << filename << "'";
+ *error_msg = StringPrintf("Failed to seek to beginning of file '%s' : %s", filename,
+ strerror(errno));
return -1;
}
return fd;
}
-bool DexFile::GetChecksum(const std::string& filename, uint32_t* checksum) {
+bool DexFile::GetChecksum(const char* filename, uint32_t* checksum, std::string* error_msg) {
CHECK(checksum != NULL);
uint32_t magic;
- int fd = OpenAndReadMagic(filename, &magic);
+ int fd = OpenAndReadMagic(filename, &magic, error_msg);
if (fd == -1) {
+ DCHECK(!error_msg->empty());
return false;
}
if (IsZipMagic(magic)) {
- UniquePtr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(fd));
+ UniquePtr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(fd, filename, error_msg));
if (zip_archive.get() == NULL) {
+ *error_msg = StringPrintf("Failed to open zip archive '%s'", filename);
return false;
}
UniquePtr<ZipEntry> zip_entry(zip_archive->Find(kClassesDex));
if (zip_entry.get() == NULL) {
- LOG(ERROR) << "Zip archive '" << filename << "' doesn't contain " << kClassesDex;
+ *error_msg = StringPrintf("Zip archive '%s' doesn\'t contain %s", filename, kClassesDex);
return false;
}
*checksum = zip_entry->GetCrc32();
return true;
}
if (IsDexMagic(magic)) {
- UniquePtr<const DexFile> dex_file(DexFile::OpenFile(fd, filename, false));
+ UniquePtr<const DexFile> dex_file(DexFile::OpenFile(fd, filename, false, error_msg));
if (dex_file.get() == NULL) {
return false;
}
*checksum = dex_file->GetHeader().checksum_;
return true;
}
- LOG(ERROR) << "Expected valid zip or dex file: " << filename;
+ *error_msg = StringPrintf("Expected valid zip or dex file: '%s'", filename);
return false;
}
-const DexFile* DexFile::Open(const std::string& filename,
- const std::string& location) {
+const DexFile* DexFile::Open(const char* filename,
+ const char* location,
+ std::string* error_msg) {
uint32_t magic;
- int fd = OpenAndReadMagic(filename, &magic);
+ int fd = OpenAndReadMagic(filename, &magic, error_msg);
if (fd == -1) {
+ DCHECK(!error_msg->empty());
return NULL;
}
if (IsZipMagic(magic)) {
- return DexFile::OpenZip(fd, location);
+ return DexFile::OpenZip(fd, location, error_msg);
}
if (IsDexMagic(magic)) {
- return DexFile::OpenFile(fd, location, true);
+ return DexFile::OpenFile(fd, location, true, error_msg);
}
- LOG(ERROR) << "Expected valid zip or dex file: " << filename;
- return NULL;
+ *error_msg = StringPrintf("Expected valid zip or dex file: '%s'", filename);
+ return nullptr;
}
int DexFile::GetPermissions() const {
@@ -160,46 +165,48 @@
}
}
-const DexFile* DexFile::OpenFile(int fd,
- const std::string& location,
- bool verify) {
- CHECK(!location.empty());
+const DexFile* DexFile::OpenFile(int fd, const char* location, bool verify,
+ std::string* error_msg) {
+ CHECK(location != nullptr);
struct stat sbuf;
memset(&sbuf, 0, sizeof(sbuf));
if (fstat(fd, &sbuf) == -1) {
- PLOG(ERROR) << "fstat \"" << location << "\" failed";
+ *error_msg = StringPrintf("DexFile: fstat \'%s\' failed: %s", location, strerror(errno));
close(fd);
- return NULL;
+ return nullptr;
}
if (S_ISDIR(sbuf.st_mode)) {
- LOG(ERROR) << "attempt to mmap directory \"" << location << "\"";
- return NULL;
+ *error_msg = StringPrintf("Attempt to mmap directory '%s'", location);
+ return nullptr;
}
size_t length = sbuf.st_size;
- UniquePtr<MemMap> map(MemMap::MapFile(length, PROT_READ, MAP_PRIVATE, fd, 0));
- if (map.get() == NULL) {
- LOG(ERROR) << "mmap \"" << location << "\" failed";
+ UniquePtr<MemMap> map(MemMap::MapFile(length, PROT_READ, MAP_PRIVATE, fd, 0, location,
+ error_msg));
+ if (map.get() == nullptr) {
+ DCHECK(!error_msg->empty());
close(fd);
- return NULL;
+ return nullptr;
}
close(fd);
if (map->Size() < sizeof(DexFile::Header)) {
- LOG(ERROR) << "Failed to open dex file '" << location << "' that is too short to have a header";
- return NULL;
+ *error_msg = StringPrintf(
+ "DexFile: failed to open dex file \'%s\' that is too short to have a header", location);
+ return nullptr;
}
const Header* dex_header = reinterpret_cast<const Header*>(map->Begin());
- const DexFile* dex_file = OpenMemory(location, dex_header->checksum_, map.release());
- if (dex_file == NULL) {
- LOG(ERROR) << "Failed to open dex file '" << location << "' from memory";
- return NULL;
+ const DexFile* dex_file = OpenMemory(location, dex_header->checksum_, map.release(), error_msg);
+ if (dex_file == nullptr) {
+ *error_msg = StringPrintf("Failed to open dex file '%s' from memory: %s", location,
+ error_msg->c_str());
+ return nullptr;
}
- if (verify && !DexFileVerifier::Verify(dex_file, dex_file->Begin(), dex_file->Size())) {
- LOG(ERROR) << "Failed to verify dex file '" << location << "'";
- return NULL;
+ if (verify && !DexFileVerifier::Verify(dex_file, dex_file->Begin(), dex_file->Size(), location,
+ error_msg)) {
+ return nullptr;
}
return dex_file;
@@ -207,49 +214,55 @@
const char* DexFile::kClassesDex = "classes.dex";
-const DexFile* DexFile::OpenZip(int fd, const std::string& location) {
- UniquePtr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(fd));
- if (zip_archive.get() == NULL) {
- LOG(ERROR) << "Failed to open " << location << " when looking for classes.dex";
- return NULL;
+const DexFile* DexFile::OpenZip(int fd, const std::string& location, std::string* error_msg) {
+ UniquePtr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(fd, location.c_str(), error_msg));
+ if (zip_archive.get() == nullptr) {
+ DCHECK(!error_msg->empty());
+ return nullptr;
}
- return DexFile::Open(*zip_archive.get(), location);
+ return DexFile::Open(*zip_archive.get(), location, error_msg);
}
const DexFile* DexFile::OpenMemory(const std::string& location,
uint32_t location_checksum,
- MemMap* mem_map) {
+ MemMap* mem_map,
+ std::string* error_msg) {
return OpenMemory(mem_map->Begin(),
mem_map->Size(),
location,
location_checksum,
- mem_map);
+ mem_map,
+ error_msg);
}
-const DexFile* DexFile::Open(const ZipArchive& zip_archive, const std::string& location) {
+const DexFile* DexFile::Open(const ZipArchive& zip_archive, const std::string& location,
+ std::string* error_msg) {
CHECK(!location.empty());
UniquePtr<ZipEntry> zip_entry(zip_archive.Find(kClassesDex));
if (zip_entry.get() == NULL) {
- LOG(ERROR) << "Failed to find classes.dex within '" << location << "'";
- return NULL;
+ *error_msg = StringPrintf("Failed to find classes.dex within '%s'", location.c_str());
+ return nullptr;
}
- UniquePtr<MemMap> map(zip_entry->ExtractToMemMap(kClassesDex));
+ UniquePtr<MemMap> map(zip_entry->ExtractToMemMap(kClassesDex, error_msg));
if (map.get() == NULL) {
- LOG(ERROR) << "Failed to extract '" << kClassesDex << "' from '" << location << "'";
- return NULL;
+ *error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", kClassesDex, location.c_str(),
+ error_msg->c_str());
+ return nullptr;
}
- UniquePtr<const DexFile> dex_file(OpenMemory(location, zip_entry->GetCrc32(), map.release()));
- if (dex_file.get() == NULL) {
- LOG(ERROR) << "Failed to open dex file '" << location << "' from memory";
- return NULL;
+ UniquePtr<const DexFile> dex_file(OpenMemory(location, zip_entry->GetCrc32(), map.release(),
+ error_msg));
+ if (dex_file.get() == nullptr) {
+ *error_msg = StringPrintf("Failed to open dex file '%s' from memory: %s", location.c_str(),
+ error_msg->c_str());
+ return nullptr;
}
- if (!DexFileVerifier::Verify(dex_file.get(), dex_file->Begin(), dex_file->Size())) {
- LOG(ERROR) << "Failed to verify dex file '" << location << "'";
- return NULL;
+ if (!DexFileVerifier::Verify(dex_file.get(), dex_file->Begin(), dex_file->Size(),
+ location.c_str(), error_msg)) {
+ return nullptr;
}
if (!dex_file->DisableWrite()) {
- LOG(ERROR) << "Failed to make dex file read only '" << location << "'";
- return NULL;
+ *error_msg = StringPrintf("Failed to make dex file '%s' read only", location.c_str());
+ return nullptr;
}
CHECK(dex_file->IsReadOnly()) << location;
return dex_file.release();
@@ -259,11 +272,11 @@
size_t size,
const std::string& location,
uint32_t location_checksum,
- MemMap* mem_map) {
+ MemMap* mem_map, std::string* error_msg) {
CHECK_ALIGNED(base, 4); // various dex file structures must be word aligned
UniquePtr<DexFile> dex_file(new DexFile(base, size, location, location_checksum, mem_map));
- if (!dex_file->Init()) {
- return NULL;
+ if (!dex_file->Init(error_msg)) {
+ return nullptr;
} else {
return dex_file.release();
}
@@ -276,9 +289,9 @@
// the global reference table is otherwise empty!
}
-bool DexFile::Init() {
+bool DexFile::Init(std::string* error_msg) {
InitMembers();
- if (!CheckMagicAndVersion()) {
+ if (!CheckMagicAndVersion(error_msg)) {
return false;
}
return true;
@@ -296,22 +309,26 @@
class_defs_ = reinterpret_cast<const ClassDef*>(b + h->class_defs_off_);
}
-bool DexFile::CheckMagicAndVersion() const {
+bool DexFile::CheckMagicAndVersion(std::string* error_msg) const {
CHECK(header_->magic_ != NULL) << GetLocation();
if (!IsMagicValid(header_->magic_)) {
- LOG(ERROR) << "Unrecognized magic number in " << GetLocation() << ":"
+ std::ostringstream oss;
+ oss << "Unrecognized magic number in " << GetLocation() << ":"
<< " " << header_->magic_[0]
<< " " << header_->magic_[1]
<< " " << header_->magic_[2]
<< " " << header_->magic_[3];
+ *error_msg = oss.str();
return false;
}
if (!IsVersionValid(header_->magic_)) {
- LOG(ERROR) << "Unrecognized version number in " << GetLocation() << ":"
+ std::ostringstream oss;
+ oss << "Unrecognized version number in " << GetLocation() << ":"
<< " " << header_->magic_[4]
<< " " << header_->magic_[5]
<< " " << header_->magic_[6]
<< " " << header_->magic_[7];
+ *error_msg = oss.str();
return false;
}
return true;
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 12e8440..035a691 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -350,22 +350,22 @@
// For .dex files, this is the header checksum.
// For zip files, this is the classes.dex zip entry CRC32 checksum.
// Return true if the checksum could be found, false otherwise.
- static bool GetChecksum(const std::string& filename, uint32_t* checksum)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static bool GetChecksum(const char* filename, uint32_t* checksum, std::string* error_msg);
// Opens .dex file, guessing the container format based on file extension
- static const DexFile* Open(const std::string& filename,
- const std::string& location);
+ static const DexFile* Open(const char* filename, const char* location, std::string* error_msg);
// Opens .dex file, backed by existing memory
static const DexFile* Open(const uint8_t* base, size_t size,
const std::string& location,
- uint32_t location_checksum) {
- return OpenMemory(base, size, location, location_checksum, NULL);
+ uint32_t location_checksum,
+ std::string* error_msg) {
+ return OpenMemory(base, size, location, location_checksum, NULL, error_msg);
}
// Opens .dex file from the classes.dex in a zip archive
- static const DexFile* Open(const ZipArchive& zip_archive, const std::string& location);
+ static const DexFile* Open(const ZipArchive& zip_archive, const std::string& location,
+ std::string* error_msg);
// Closes a .dex file.
virtual ~DexFile();
@@ -820,24 +820,24 @@
private:
// Opens a .dex file
- static const DexFile* OpenFile(int fd,
- const std::string& location,
- bool verify);
+ static const DexFile* OpenFile(int fd, const char* location, bool verify, std::string* error_msg);
// Opens a dex file from within a .jar, .zip, or .apk file
- static const DexFile* OpenZip(int fd, const std::string& location);
+ static const DexFile* OpenZip(int fd, const std::string& location, std::string* error_msg);
// Opens a .dex file at the given address backed by a MemMap
static const DexFile* OpenMemory(const std::string& location,
uint32_t location_checksum,
- MemMap* mem_map);
+ MemMap* mem_map,
+ std::string* error_msg);
// Opens a .dex file at the given address, optionally backed by a MemMap
static const DexFile* OpenMemory(const byte* dex_file,
size_t size,
const std::string& location,
uint32_t location_checksum,
- MemMap* mem_map);
+ MemMap* mem_map,
+ std::string* error_msg);
DexFile(const byte* base, size_t size,
const std::string& location,
@@ -861,13 +861,13 @@
}
// Top-level initializer that calls other Init methods.
- bool Init();
+ bool Init(std::string* error_msg);
// Caches pointers into to the various file sections.
void InitMembers();
// Returns true if the header magic and version numbers are of the expected values.
- bool CheckMagicAndVersion() const;
+ bool CheckMagicAndVersion(std::string* error_msg) const;
void DecodeDebugInfo0(const CodeItem* code_item, bool is_static, uint32_t method_idx,
DexDebugNewPositionCb position_cb, DexDebugNewLocalCb local_cb,
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 7575d4a..543a7b0 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -56,7 +56,7 @@
"AAMgAAACAAAAiAIAAAQgAAADAAAAlAIAAAAgAAACAAAAqwIAAAAQAAABAAAAxAIAAA==";
static const DexFile* OpenDexFileBase64(const char* base64,
- const std::string& location) {
+ const char* location) {
// decode base64
CHECK(base64 != NULL);
size_t length;
@@ -64,7 +64,7 @@
CHECK(dex_bytes.get() != NULL);
// write to provided file
- UniquePtr<File> file(OS::CreateEmptyFile(location.c_str()));
+ UniquePtr<File> file(OS::CreateEmptyFile(location));
CHECK(file.get() != NULL);
if (!file->WriteFully(dex_bytes.get(), length)) {
PLOG(FATAL) << "Failed to write base64 as dex file";
@@ -73,8 +73,9 @@
// read dex file
ScopedObjectAccess soa(Thread::Current());
- const DexFile* dex_file = DexFile::Open(location, location);
- CHECK(dex_file != NULL);
+ std::string error_msg;
+ const DexFile* dex_file = DexFile::Open(location, location, &error_msg);
+ CHECK(dex_file != nullptr) << error_msg;
EXPECT_EQ(PROT_READ, dex_file->GetPermissions());
EXPECT_TRUE(dex_file->IsReadOnly());
return dex_file;
@@ -82,7 +83,7 @@
TEST_F(DexFileTest, Header) {
ScratchFile tmp;
- UniquePtr<const DexFile> raw(OpenDexFileBase64(kRawDex, tmp.GetFilename()));
+ UniquePtr<const DexFile> raw(OpenDexFileBase64(kRawDex, tmp.GetFilename().c_str()));
ASSERT_TRUE(raw.get() != NULL);
const DexFile::Header& header = raw->GetHeader();
@@ -120,7 +121,9 @@
TEST_F(DexFileTest, GetChecksum) {
uint32_t checksum;
ScopedObjectAccess soa(Thread::Current());
- EXPECT_TRUE(DexFile::GetChecksum(GetLibCoreDexFileName(), &checksum));
+ std::string error_msg;
+ EXPECT_TRUE(DexFile::GetChecksum(GetLibCoreDexFileName().c_str(), &checksum, &error_msg))
+ << error_msg;
EXPECT_EQ(java_lang_dex_file_->GetLocationChecksum(), checksum);
}
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index 7dc2b31..56bf21d 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -65,12 +65,22 @@
return true;
}
-static bool CheckShortyDescriptorMatch(char shorty_char, const char* descriptor,
- bool is_return_type) {
+bool DexFileVerifier::Verify(const DexFile* dex_file, const byte* begin, size_t size,
+ const char* location, std::string* error_msg) {
+ UniquePtr<DexFileVerifier> verifier(new DexFileVerifier(dex_file, begin, size, location));
+ if (!verifier->Verify()) {
+ *error_msg = verifier->FailureReason();
+ return false;
+ }
+ return true;
+}
+
+bool DexFileVerifier::CheckShortyDescriptorMatch(char shorty_char, const char* descriptor,
+ bool is_return_type) {
switch (shorty_char) {
case 'V':
- if (!is_return_type) {
- LOG(ERROR) << "Invalid use of void";
+ if (UNLIKELY(!is_return_type)) {
+ ErrorStringPrintf("Invalid use of void");
return false;
}
// Intentional fallthrough.
@@ -82,62 +92,58 @@
case 'J':
case 'S':
case 'Z':
- if ((descriptor[0] != shorty_char) || (descriptor[1] != '\0')) {
- LOG(ERROR) << StringPrintf("Shorty vs. primitive type mismatch: '%c', '%s'", shorty_char, descriptor);
+ if (UNLIKELY((descriptor[0] != shorty_char) || (descriptor[1] != '\0'))) {
+ ErrorStringPrintf("Shorty vs. primitive type mismatch: '%c', '%s'",
+ shorty_char, descriptor);
return false;
}
break;
case 'L':
- if ((descriptor[0] != 'L') && (descriptor[0] != '[')) {
- LOG(ERROR) << StringPrintf("Shorty vs. type mismatch: '%c', '%s'", shorty_char, descriptor);
+ if (UNLIKELY((descriptor[0] != 'L') && (descriptor[0] != '['))) {
+ ErrorStringPrintf("Shorty vs. type mismatch: '%c', '%s'", shorty_char, descriptor);
return false;
}
break;
default:
- LOG(ERROR) << "Bad shorty character: '" << shorty_char << "'";
+ ErrorStringPrintf("Bad shorty character: '%c'", shorty_char);
return false;
}
return true;
}
-bool DexFileVerifier::Verify(const DexFile* dex_file, const byte* begin, size_t size) {
- UniquePtr<DexFileVerifier> verifier(new DexFileVerifier(dex_file, begin, size));
- return verifier->Verify();
-}
-
-bool DexFileVerifier::CheckPointerRange(const void* start, const void* end, const char* label) const {
+bool DexFileVerifier::CheckPointerRange(const void* start, const void* end, const char* label) {
uint32_t range_start = reinterpret_cast<uint32_t>(start);
uint32_t range_end = reinterpret_cast<uint32_t>(end);
uint32_t file_start = reinterpret_cast<uint32_t>(begin_);
uint32_t file_end = file_start + size_;
- if ((range_start < file_start) || (range_start > file_end) ||
- (range_end < file_start) || (range_end > file_end)) {
- LOG(ERROR) << StringPrintf("Bad range for %s: %x to %x", label,
- range_start - file_start, range_end - file_start);
+ if (UNLIKELY((range_start < file_start) || (range_start > file_end) ||
+ (range_end < file_start) || (range_end > file_end))) {
+ ErrorStringPrintf("Bad range for %s: %x to %x", label,
+ range_start - file_start, range_end - file_start);
return false;
}
return true;
}
bool DexFileVerifier::CheckListSize(const void* start, uint32_t count,
- uint32_t element_size, const char* label) const {
+ uint32_t element_size, const char* label) {
const byte* list_start = reinterpret_cast<const byte*>(start);
return CheckPointerRange(list_start, list_start + (count * element_size), label);
}
-bool DexFileVerifier::CheckIndex(uint32_t field, uint32_t limit, const char* label) const {
- if (field >= limit) {
- LOG(ERROR) << StringPrintf("Bad index for %s: %x >= %x", label, field, limit);
+bool DexFileVerifier::CheckIndex(uint32_t field, uint32_t limit, const char* label) {
+ if (UNLIKELY(field >= limit)) {
+ ErrorStringPrintf("Bad index for %s: %x >= %x", label, field, limit);
return false;
}
return true;
}
-bool DexFileVerifier::CheckHeader() const {
+bool DexFileVerifier::CheckHeader() {
// Check file size from the header.
uint32_t expected_size = header_->file_size_;
if (size_ != expected_size) {
- LOG(ERROR) << "Bad file size (" << size_ << ", expected " << expected_size << ")";
+ ErrorStringPrintf("Bad file size (%zd, expected %ud)", size_, expected_size);
return false;
}
@@ -147,25 +153,25 @@
const byte* non_sum_ptr = reinterpret_cast<const byte*>(header_) + non_sum;
adler_checksum = adler32(adler_checksum, non_sum_ptr, expected_size - non_sum);
if (adler_checksum != header_->checksum_) {
- LOG(ERROR) << StringPrintf("Bad checksum (%08x, expected %08x)", adler_checksum, header_->checksum_);
+ ErrorStringPrintf("Bad checksum (%08x, expected %08x)", adler_checksum, header_->checksum_);
return false;
}
// Check the contents of the header.
if (header_->endian_tag_ != DexFile::kDexEndianConstant) {
- LOG(ERROR) << StringPrintf("Unexpected endian_tag: %x", header_->endian_tag_);
+ ErrorStringPrintf("Unexpected endian_tag: %x", header_->endian_tag_);
return false;
}
if (header_->header_size_ != sizeof(DexFile::Header)) {
- LOG(ERROR) << "Bad header size: " << header_->header_size_;
+ ErrorStringPrintf("Bad header size: %ud", header_->header_size_);
return false;
}
return true;
}
-bool DexFileVerifier::CheckMap() const {
+bool DexFileVerifier::CheckMap() {
const DexFile::MapList* map = reinterpret_cast<const DexFile::MapList*>(begin_ + header_->map_off_);
const DexFile::MapItem* item = map->list_;
@@ -182,19 +188,20 @@
// Check the items listed in the map.
for (uint32_t i = 0; i < count; i++) {
- if (last_offset >= item->offset_ && i != 0) {
- LOG(ERROR) << StringPrintf("Out of order map item: %x then %x", last_offset, item->offset_);
+ if (UNLIKELY(last_offset >= item->offset_ && i != 0)) {
+ ErrorStringPrintf("Out of order map item: %x then %x", last_offset, item->offset_);
return false;
}
- if (item->offset_ >= header_->file_size_) {
- LOG(ERROR) << StringPrintf("Map item after end of file: %x, size %x", item->offset_, header_->file_size_);
+ if (UNLIKELY(item->offset_ >= header_->file_size_)) {
+ ErrorStringPrintf("Map item after end of file: %x, size %x",
+ item->offset_, header_->file_size_);
return false;
}
if (IsDataSectionType(item->type_)) {
uint32_t icount = item->size_;
- if (icount > data_items_left) {
- LOG(ERROR) << "Too many items in data section: " << data_item_count + icount;
+ if (UNLIKELY(icount > data_items_left)) {
+ ErrorStringPrintf("Too many items in data section: %ud", data_item_count + icount);
return false;
}
data_items_left -= icount;
@@ -203,13 +210,13 @@
uint32_t bit = MapTypeToBitMask(item->type_);
- if (bit == 0) {
- LOG(ERROR) << StringPrintf("Unknown map section type %x", item->type_);
+ if (UNLIKELY(bit == 0)) {
+ ErrorStringPrintf("Unknown map section type %x", item->type_);
return false;
}
- if ((used_bits & bit) != 0) {
- LOG(ERROR) << StringPrintf("Duplicate map section of type %x", item->type_);
+ if (UNLIKELY((used_bits & bit) != 0)) {
+ ErrorStringPrintf("Duplicate map section of type %x", item->type_);
return false;
}
@@ -219,63 +226,59 @@
}
// Check for missing sections in the map.
- if ((used_bits & MapTypeToBitMask(DexFile::kDexTypeHeaderItem)) == 0) {
- LOG(ERROR) << "Map is missing header entry";
+ if (UNLIKELY((used_bits & MapTypeToBitMask(DexFile::kDexTypeHeaderItem)) == 0)) {
+ ErrorStringPrintf("Map is missing header entry");
return false;
}
- if ((used_bits & MapTypeToBitMask(DexFile::kDexTypeMapList)) == 0) {
- LOG(ERROR) << "Map is missing map_list entry";
+ if (UNLIKELY((used_bits & MapTypeToBitMask(DexFile::kDexTypeMapList)) == 0)) {
+ ErrorStringPrintf("Map is missing map_list entry");
return false;
}
- if ((used_bits & MapTypeToBitMask(DexFile::kDexTypeStringIdItem)) == 0 &&
- ((header_->string_ids_off_ != 0) || (header_->string_ids_size_ != 0))) {
- LOG(ERROR) << "Map is missing string_ids entry";
+ if (UNLIKELY((used_bits & MapTypeToBitMask(DexFile::kDexTypeStringIdItem)) == 0 &&
+ ((header_->string_ids_off_ != 0) || (header_->string_ids_size_ != 0)))) {
+ ErrorStringPrintf("Map is missing string_ids entry");
return false;
}
- if ((used_bits & MapTypeToBitMask(DexFile::kDexTypeTypeIdItem)) == 0 &&
- ((header_->type_ids_off_ != 0) || (header_->type_ids_size_ != 0))) {
- LOG(ERROR) << "Map is missing type_ids entry";
+ if (UNLIKELY((used_bits & MapTypeToBitMask(DexFile::kDexTypeTypeIdItem)) == 0 &&
+ ((header_->type_ids_off_ != 0) || (header_->type_ids_size_ != 0)))) {
+ ErrorStringPrintf("Map is missing type_ids entry");
return false;
}
- if ((used_bits & MapTypeToBitMask(DexFile::kDexTypeProtoIdItem)) == 0 &&
- ((header_->proto_ids_off_ != 0) || (header_->proto_ids_size_ != 0))) {
- LOG(ERROR) << "Map is missing proto_ids entry";
+ if (UNLIKELY((used_bits & MapTypeToBitMask(DexFile::kDexTypeProtoIdItem)) == 0 &&
+ ((header_->proto_ids_off_ != 0) || (header_->proto_ids_size_ != 0)))) {
+ ErrorStringPrintf("Map is missing proto_ids entry");
return false;
}
- if ((used_bits & MapTypeToBitMask(DexFile::kDexTypeFieldIdItem)) == 0 &&
- ((header_->field_ids_off_ != 0) || (header_->field_ids_size_ != 0))) {
- LOG(ERROR) << "Map is missing field_ids entry";
+ if (UNLIKELY((used_bits & MapTypeToBitMask(DexFile::kDexTypeFieldIdItem)) == 0 &&
+ ((header_->field_ids_off_ != 0) || (header_->field_ids_size_ != 0)))) {
+ ErrorStringPrintf("Map is missing field_ids entry");
return false;
}
- if ((used_bits & MapTypeToBitMask(DexFile::kDexTypeMethodIdItem)) == 0 &&
- ((header_->method_ids_off_ != 0) || (header_->method_ids_size_ != 0))) {
- LOG(ERROR) << "Map is missing method_ids entry";
+ if (UNLIKELY((used_bits & MapTypeToBitMask(DexFile::kDexTypeMethodIdItem)) == 0 &&
+ ((header_->method_ids_off_ != 0) || (header_->method_ids_size_ != 0)))) {
+ ErrorStringPrintf("Map is missing method_ids entry");
return false;
}
- if ((used_bits & MapTypeToBitMask(DexFile::kDexTypeClassDefItem)) == 0 &&
- ((header_->class_defs_off_ != 0) || (header_->class_defs_size_ != 0))) {
- LOG(ERROR) << "Map is missing class_defs entry";
+ if (UNLIKELY((used_bits & MapTypeToBitMask(DexFile::kDexTypeClassDefItem)) == 0 &&
+ ((header_->class_defs_off_ != 0) || (header_->class_defs_size_ != 0)))) {
+ ErrorStringPrintf("Map is missing class_defs entry");
return false;
}
-
return true;
}
uint32_t DexFileVerifier::ReadUnsignedLittleEndian(uint32_t size) {
uint32_t result = 0;
- if (!CheckPointerRange(ptr_, ptr_ + size, "encoded_value")) {
- return 0;
+ if (LIKELY(CheckPointerRange(ptr_, ptr_ + size, "encoded_value"))) {
+ for (uint32_t i = 0; i < size; i++) {
+ result |= ((uint32_t) *(ptr_++)) << (i * 8);
+ }
}
-
- for (uint32_t i = 0; i < size; i++) {
- result |= ((uint32_t) *(ptr_++)) << (i * 8);
- }
-
return result;
}
bool DexFileVerifier::CheckAndGetHandlerOffsets(const DexFile::CodeItem* code_item,
- uint32_t* handler_offsets, uint32_t handlers_size) {
+ uint32_t* handler_offsets, uint32_t handlers_size) {
const byte* handlers_base = DexFile::GetCatchHandlerData(*code_item, 0);
for (uint32_t i = 0; i < handlers_size; i++) {
@@ -283,8 +286,8 @@
uint32_t offset = reinterpret_cast<uint32_t>(ptr_) - reinterpret_cast<uint32_t>(handlers_base);
int32_t size = DecodeSignedLeb128(&ptr_);
- if ((size < -65536) || (size > 65536)) {
- LOG(ERROR) << "Invalid exception handler size: " << size;
+ if (UNLIKELY((size < -65536) || (size > 65536))) {
+ ErrorStringPrintf("Invalid exception handler size: %d", size);
return false;
}
@@ -304,16 +307,16 @@
}
uint32_t addr = DecodeUnsignedLeb128(&ptr_);
- if (addr >= code_item->insns_size_in_code_units_) {
- LOG(ERROR) << StringPrintf("Invalid handler addr: %x", addr);
+ if (UNLIKELY(addr >= code_item->insns_size_in_code_units_)) {
+ ErrorStringPrintf("Invalid handler addr: %x", addr);
return false;
}
}
if (catch_all) {
uint32_t addr = DecodeUnsignedLeb128(&ptr_);
- if (addr >= code_item->insns_size_in_code_units_) {
- LOG(ERROR) << StringPrintf("Invalid handler catch_all_addr: %x", addr);
+ if (UNLIKELY(addr >= code_item->insns_size_in_code_units_)) {
+ ErrorStringPrintf("Invalid handler catch_all_addr: %x", addr);
return false;
}
}
@@ -323,21 +326,21 @@
}
bool DexFileVerifier::CheckClassDataItemField(uint32_t idx, uint32_t access_flags,
- bool expect_static) const {
+ bool expect_static) {
if (!CheckIndex(idx, header_->field_ids_size_, "class_data_item field_idx")) {
return false;
}
bool is_static = (access_flags & kAccStatic) != 0;
- if (is_static != expect_static) {
- LOG(ERROR) << "Static/instance field not in expected list";
+ if (UNLIKELY(is_static != expect_static)) {
+ ErrorStringPrintf("Static/instance field not in expected list");
return false;
}
uint32_t access_field_mask = kAccPublic | kAccPrivate | kAccProtected | kAccStatic |
kAccFinal | kAccVolatile | kAccTransient | kAccSynthetic | kAccEnum;
- if ((access_flags & ~access_field_mask) != 0) {
- LOG(ERROR) << StringPrintf("Bad class_data_item field access_flags %x", access_flags);
+ if (UNLIKELY((access_flags & ~access_field_mask) != 0)) {
+ ErrorStringPrintf("Bad class_data_item field access_flags %x", access_flags);
return false;
}
@@ -345,7 +348,7 @@
}
bool DexFileVerifier::CheckClassDataItemMethod(uint32_t idx, uint32_t access_flags,
- uint32_t code_offset, bool expect_direct) const {
+ uint32_t code_offset, bool expect_direct) {
if (!CheckIndex(idx, header_->method_ids_size_, "class_data_item method_idx")) {
return false;
}
@@ -355,26 +358,27 @@
bool is_synchronized = (access_flags & kAccSynchronized) != 0;
bool allow_synchronized = (access_flags & kAccNative) != 0;
- if (is_direct != expect_direct) {
- LOG(ERROR) << "Direct/virtual method not in expected list";
+ if (UNLIKELY(is_direct != expect_direct)) {
+ ErrorStringPrintf("Direct/virtual method not in expected list");
return false;
}
uint32_t access_method_mask = kAccPublic | kAccPrivate | kAccProtected | kAccStatic |
kAccFinal | kAccSynchronized | kAccBridge | kAccVarargs | kAccNative | kAccAbstract |
kAccStrict | kAccSynthetic | kAccConstructor | kAccDeclaredSynchronized;
- if (((access_flags & ~access_method_mask) != 0) || (is_synchronized && !allow_synchronized)) {
- LOG(ERROR) << StringPrintf("Bad class_data_item method access_flags %x", access_flags);
+ if (UNLIKELY(((access_flags & ~access_method_mask) != 0) ||
+ (is_synchronized && !allow_synchronized))) {
+ ErrorStringPrintf("Bad class_data_item method access_flags %x", access_flags);
return false;
}
- if (expect_code && code_offset == 0) {
- LOG(ERROR)<< StringPrintf("Unexpected zero value for class_data_item method code_off"
- " with access flags %x", access_flags);
+ if (UNLIKELY(expect_code && (code_offset == 0))) {
+ ErrorStringPrintf("Unexpected zero value for class_data_item method code_off with access "
+ "flags %x", access_flags);
return false;
- } else if (!expect_code && code_offset != 0) {
- LOG(ERROR) << StringPrintf("Unexpected non-zero value %x for class_data_item method code_off"
- " with access flags %x", code_offset, access_flags);
+ } else if (UNLIKELY(!expect_code && (code_offset != 0))) {
+ ErrorStringPrintf("Unexpected non-zero value %x for class_data_item method code_off"
+ " with access flags %x", code_offset, access_flags);
return false;
}
@@ -387,8 +391,8 @@
return false;
}
while (offset < aligned_offset) {
- if (*ptr_ != '\0') {
- LOG(ERROR) << StringPrintf("Non-zero padding %x before section start at %x", *ptr_, offset);
+ if (UNLIKELY(*ptr_ != '\0')) {
+ ErrorStringPrintf("Non-zero padding %x before section start at %x", *ptr_, offset);
return false;
}
ptr_++;
@@ -409,24 +413,24 @@
switch (value_type) {
case DexFile::kDexAnnotationByte:
- if (value_arg != 0) {
- LOG(ERROR) << StringPrintf("Bad encoded_value byte size %x", value_arg);
+ if (UNLIKELY(value_arg != 0)) {
+ ErrorStringPrintf("Bad encoded_value byte size %x", value_arg);
return false;
}
ptr_++;
break;
case DexFile::kDexAnnotationShort:
case DexFile::kDexAnnotationChar:
- if (value_arg > 1) {
- LOG(ERROR) << StringPrintf("Bad encoded_value char/short size %x", value_arg);
+ if (UNLIKELY(value_arg > 1)) {
+ ErrorStringPrintf("Bad encoded_value char/short size %x", value_arg);
return false;
}
ptr_ += value_arg + 1;
break;
case DexFile::kDexAnnotationInt:
case DexFile::kDexAnnotationFloat:
- if (value_arg > 3) {
- LOG(ERROR) << StringPrintf("Bad encoded_value int/float size %x", value_arg);
+ if (UNLIKELY(value_arg > 3)) {
+ ErrorStringPrintf("Bad encoded_value int/float size %x", value_arg);
return false;
}
ptr_ += value_arg + 1;
@@ -436,8 +440,8 @@
ptr_ += value_arg + 1;
break;
case DexFile::kDexAnnotationString: {
- if (value_arg > 3) {
- LOG(ERROR) << StringPrintf("Bad encoded_value string size %x", value_arg);
+ if (UNLIKELY(value_arg > 3)) {
+ ErrorStringPrintf("Bad encoded_value string size %x", value_arg);
return false;
}
uint32_t idx = ReadUnsignedLittleEndian(value_arg + 1);
@@ -447,8 +451,8 @@
break;
}
case DexFile::kDexAnnotationType: {
- if (value_arg > 3) {
- LOG(ERROR) << StringPrintf("Bad encoded_value type size %x", value_arg);
+ if (UNLIKELY(value_arg > 3)) {
+ ErrorStringPrintf("Bad encoded_value type size %x", value_arg);
return false;
}
uint32_t idx = ReadUnsignedLittleEndian(value_arg + 1);
@@ -459,8 +463,8 @@
}
case DexFile::kDexAnnotationField:
case DexFile::kDexAnnotationEnum: {
- if (value_arg > 3) {
- LOG(ERROR) << StringPrintf("Bad encoded_value field/enum size %x", value_arg);
+ if (UNLIKELY(value_arg > 3)) {
+ ErrorStringPrintf("Bad encoded_value field/enum size %x", value_arg);
return false;
}
uint32_t idx = ReadUnsignedLittleEndian(value_arg + 1);
@@ -470,8 +474,8 @@
break;
}
case DexFile::kDexAnnotationMethod: {
- if (value_arg > 3) {
- LOG(ERROR) << StringPrintf("Bad encoded_value method size %x", value_arg);
+ if (UNLIKELY(value_arg > 3)) {
+ ErrorStringPrintf("Bad encoded_value method size %x", value_arg);
return false;
}
uint32_t idx = ReadUnsignedLittleEndian(value_arg + 1);
@@ -481,8 +485,8 @@
break;
}
case DexFile::kDexAnnotationArray:
- if (value_arg != 0) {
- LOG(ERROR) << StringPrintf("Bad encoded_value array value_arg %x", value_arg);
+ if (UNLIKELY(value_arg != 0)) {
+ ErrorStringPrintf("Bad encoded_value array value_arg %x", value_arg);
return false;
}
if (!CheckEncodedArray()) {
@@ -490,8 +494,8 @@
}
break;
case DexFile::kDexAnnotationAnnotation:
- if (value_arg != 0) {
- LOG(ERROR) << StringPrintf("Bad encoded_value annotation value_arg %x", value_arg);
+ if (UNLIKELY(value_arg != 0)) {
+ ErrorStringPrintf("Bad encoded_value annotation value_arg %x", value_arg);
return false;
}
if (!CheckEncodedAnnotation()) {
@@ -499,19 +503,19 @@
}
break;
case DexFile::kDexAnnotationNull:
- if (value_arg != 0) {
- LOG(ERROR) << StringPrintf("Bad encoded_value null value_arg %x", value_arg);
+ if (UNLIKELY(value_arg != 0)) {
+ ErrorStringPrintf("Bad encoded_value null value_arg %x", value_arg);
return false;
}
break;
case DexFile::kDexAnnotationBoolean:
- if (value_arg > 1) {
- LOG(ERROR) << StringPrintf("Bad encoded_value boolean size %x", value_arg);
+ if (UNLIKELY(value_arg > 1)) {
+ ErrorStringPrintf("Bad encoded_value boolean size %x", value_arg);
return false;
}
break;
default:
- LOG(ERROR) << StringPrintf("Bogus encoded_value value_type %x", value_type);
+ ErrorStringPrintf("Bogus encoded_value value_type %x", value_type);
return false;
}
@@ -523,7 +527,7 @@
while (size--) {
if (!CheckEncodedValue()) {
- LOG(ERROR) << "Bad encoded_array value";
+ failure_reason_ = StringPrintf("Bad encoded_array value: %s", failure_reason_.c_str());
return false;
}
}
@@ -545,9 +549,9 @@
return false;
}
- if (last_idx >= idx && i != 0) {
- LOG(ERROR) << StringPrintf("Out-of-order annotation_element name_idx: %x then %x",
- last_idx, idx);
+ if (UNLIKELY(last_idx >= idx && i != 0)) {
+ ErrorStringPrintf("Out-of-order annotation_element name_idx: %x then %x",
+ last_idx, idx);
return false;
}
@@ -596,21 +600,22 @@
return false;
}
- if (code_item->ins_size_ > code_item->registers_size_) {
- LOG(ERROR) << "ins_size (" << code_item->ins_size_ << ") > registers_size ("
- << code_item->registers_size_ << ")";
+ if (UNLIKELY(code_item->ins_size_ > code_item->registers_size_)) {
+ ErrorStringPrintf("ins_size (%ud) > registers_size (%ud)",
+ code_item->ins_size_, code_item->registers_size_);
return false;
}
- if ((code_item->outs_size_ > 5) && (code_item->outs_size_ > code_item->registers_size_)) {
+ if (UNLIKELY((code_item->outs_size_ > 5) &&
+ (code_item->outs_size_ > code_item->registers_size_))) {
/*
* outs_size can be up to 5, even if registers_size is smaller, since the
* short forms of method invocation allow repetitions of a register multiple
* times within a single parameter list. However, longer parameter lists
* need to be represented in-order in the register file.
*/
- LOG(ERROR) << "outs_size (" << code_item->outs_size_ << ") > registers_size ("
- << code_item->registers_size_ << ")";
+ ErrorStringPrintf("outs_size (%ud) > registers_size (%ud)",
+ code_item->outs_size_, code_item->registers_size_);
return false;
}
@@ -629,7 +634,7 @@
// try_items are 4-byte aligned. Verify the spacer is 0.
if ((((uint32_t) &insns[insns_size] & 3) != 0) && (insns[insns_size] != 0)) {
- LOG(ERROR) << StringPrintf("Non-zero padding: %x", insns[insns_size]);
+ ErrorStringPrintf("Non-zero padding: %x", insns[insns_size]);
return false;
}
@@ -641,8 +646,8 @@
return false;
}
- if ((handlers_size == 0) || (handlers_size >= 65536)) {
- LOG(ERROR) << "Invalid handlers_size: " << handlers_size;
+ if (UNLIKELY((handlers_size == 0) || (handlers_size >= 65536))) {
+ ErrorStringPrintf("Invalid handlers_size: %ud", handlers_size);
return false;
}
@@ -653,14 +658,13 @@
uint32_t last_addr = 0;
while (try_items_size--) {
- if (try_items->start_addr_ < last_addr) {
- LOG(ERROR) << StringPrintf("Out-of_order try_item with start_addr: %x",
- try_items->start_addr_);
+ if (UNLIKELY(try_items->start_addr_ < last_addr)) {
+ ErrorStringPrintf("Out-of_order try_item with start_addr: %x", try_items->start_addr_);
return false;
}
- if (try_items->start_addr_ >= insns_size) {
- LOG(ERROR) << StringPrintf("Invalid try_item start_addr: %x", try_items->start_addr_);
+ if (UNLIKELY(try_items->start_addr_ >= insns_size)) {
+ ErrorStringPrintf("Invalid try_item start_addr: %x", try_items->start_addr_);
return false;
}
@@ -671,14 +675,14 @@
}
}
- if (i == handlers_size) {
- LOG(ERROR) << StringPrintf("Bogus handler offset: %x", try_items->handler_off_);
+ if (UNLIKELY(i == handlers_size)) {
+ ErrorStringPrintf("Bogus handler offset: %x", try_items->handler_off_);
return false;
}
last_addr = try_items->start_addr_ + try_items->insn_count_;
- if (last_addr > insns_size) {
- LOG(ERROR) << StringPrintf("Invalid try_item insn_count: %x", try_items->insn_count_);
+ if (UNLIKELY(last_addr > insns_size)) {
+ ErrorStringPrintf("Invalid try_item insn_count: %x", try_items->insn_count_);
return false;
}
@@ -693,8 +697,8 @@
const byte* file_end = begin_ + size_;
for (uint32_t i = 0; i < size; i++) {
- if (ptr_ >= file_end) {
- LOG(ERROR) << "String data would go beyond end-of-file";
+ if (UNLIKELY(ptr_ >= file_end)) {
+ ErrorStringPrintf("String data would go beyond end-of-file");
return false;
}
@@ -704,8 +708,8 @@
switch (byte >> 4) {
case 0x00:
// Special case of bit pattern 0xxx.
- if (byte == 0) {
- LOG(ERROR) << StringPrintf("String data shorter than indicated utf16_size %x", size);
+ if (UNLIKELY(byte == 0)) {
+ ErrorStringPrintf("String data shorter than indicated utf16_size %x", size);
return false;
}
break;
@@ -725,19 +729,19 @@
case 0x0f:
// Illegal bit patterns 10xx or 1111.
// Note: 1111 is valid for normal UTF-8, but not here.
- LOG(ERROR) << StringPrintf("Illegal start byte %x in string data", byte);
+ ErrorStringPrintf("Illegal start byte %x in string data", byte);
return false;
case 0x0c:
case 0x0d: {
// Bit pattern 110x has an additional byte.
uint8_t byte2 = *(ptr_++);
- if ((byte2 & 0xc0) != 0x80) {
- LOG(ERROR) << StringPrintf("Illegal continuation byte %x in string data", byte2);
+ if (UNLIKELY((byte2 & 0xc0) != 0x80)) {
+ ErrorStringPrintf("Illegal continuation byte %x in string data", byte2);
return false;
}
uint16_t value = ((byte & 0x1f) << 6) | (byte2 & 0x3f);
- if ((value != 0) && (value < 0x80)) {
- LOG(ERROR) << StringPrintf("Illegal representation for value %x in string data", value);
+ if (UNLIKELY((value != 0) && (value < 0x80))) {
+ ErrorStringPrintf("Illegal representation for value %x in string data", value);
return false;
}
break;
@@ -745,18 +749,18 @@
case 0x0e: {
// Bit pattern 1110 has 2 additional bytes.
uint8_t byte2 = *(ptr_++);
- if ((byte2 & 0xc0) != 0x80) {
- LOG(ERROR) << StringPrintf("Illegal continuation byte %x in string data", byte2);
+ if (UNLIKELY((byte2 & 0xc0) != 0x80)) {
+ ErrorStringPrintf("Illegal continuation byte %x in string data", byte2);
return false;
}
uint8_t byte3 = *(ptr_++);
- if ((byte3 & 0xc0) != 0x80) {
- LOG(ERROR) << StringPrintf("Illegal continuation byte %x in string data", byte3);
+ if (UNLIKELY((byte3 & 0xc0) != 0x80)) {
+ ErrorStringPrintf("Illegal continuation byte %x in string data", byte3);
return false;
}
uint16_t value = ((byte & 0x0f) << 12) | ((byte2 & 0x3f) << 6) | (byte3 & 0x3f);
- if (value < 0x800) {
- LOG(ERROR) << StringPrintf("Illegal representation for value %x in string data", value);
+ if (UNLIKELY(value < 0x800)) {
+ ErrorStringPrintf("Illegal representation for value %x in string data", value);
return false;
}
break;
@@ -764,8 +768,8 @@
}
}
- if (*(ptr_++) != '\0') {
- LOG(ERROR) << StringPrintf("String longer than indicated size %x", size);
+ if (UNLIKELY(*(ptr_++) != '\0')) {
+ ErrorStringPrintf("String longer than indicated size %x", size);
return false;
}
@@ -775,8 +779,8 @@
bool DexFileVerifier::CheckIntraDebugInfoItem() {
DecodeUnsignedLeb128(&ptr_);
uint32_t parameters_size = DecodeUnsignedLeb128(&ptr_);
- if (parameters_size > 65536) {
- LOG(ERROR) << StringPrintf("Invalid parameters_size: %x", parameters_size);
+ if (UNLIKELY(parameters_size > 65536)) {
+ ErrorStringPrintf("Invalid parameters_size: %x", parameters_size);
return false;
}
@@ -806,8 +810,8 @@
}
case DexFile::DBG_START_LOCAL: {
uint32_t reg_num = DecodeUnsignedLeb128(&ptr_);
- if (reg_num >= 65536) {
- LOG(ERROR) << StringPrintf("Bad reg_num for opcode %x", opcode);
+ if (UNLIKELY(reg_num >= 65536)) {
+ ErrorStringPrintf("Bad reg_num for opcode %x", opcode);
return false;
}
uint32_t name_idx = DecodeUnsignedLeb128(&ptr_);
@@ -829,16 +833,16 @@
case DexFile::DBG_END_LOCAL:
case DexFile::DBG_RESTART_LOCAL: {
uint32_t reg_num = DecodeUnsignedLeb128(&ptr_);
- if (reg_num >= 65536) {
- LOG(ERROR) << StringPrintf("Bad reg_num for opcode %x", opcode);
+ if (UNLIKELY(reg_num >= 65536)) {
+ ErrorStringPrintf("Bad reg_num for opcode %x", opcode);
return false;
}
break;
}
case DexFile::DBG_START_LOCAL_EXTENDED: {
uint32_t reg_num = DecodeUnsignedLeb128(&ptr_);
- if (reg_num >= 65536) {
- LOG(ERROR) << StringPrintf("Bad reg_num for opcode %x", opcode);
+ if (UNLIKELY(reg_num >= 65536)) {
+ ErrorStringPrintf("Bad reg_num for opcode %x", opcode);
return false;
}
uint32_t name_idx = DecodeUnsignedLeb128(&ptr_);
@@ -890,7 +894,7 @@
case DexFile::kDexVisibilitySystem:
break;
default:
- LOG(ERROR) << StringPrintf("Bad annotation visibility: %x", *ptr_);
+ ErrorStringPrintf("Bad annotation visibility: %x", *ptr_);
return false;
}
@@ -918,8 +922,8 @@
uint32_t last_idx = 0;
for (uint32_t i = 0; i < field_count; i++) {
- if (last_idx >= field_item->field_idx_ && i != 0) {
- LOG(ERROR) << StringPrintf("Out-of-order field_idx for annotation: %x then %x", last_idx, field_item->field_idx_);
+ if (UNLIKELY(last_idx >= field_item->field_idx_ && i != 0)) {
+ ErrorStringPrintf("Out-of-order field_idx for annotation: %x then %x", last_idx, field_item->field_idx_);
return false;
}
last_idx = field_item->field_idx_;
@@ -936,9 +940,9 @@
last_idx = 0;
for (uint32_t i = 0; i < method_count; i++) {
- if (last_idx >= method_item->method_idx_ && i != 0) {
- LOG(ERROR) << StringPrintf("Out-of-order method_idx for annotation: %x then %x",
- last_idx, method_item->method_idx_);
+ if (UNLIKELY(last_idx >= method_item->method_idx_ && i != 0)) {
+ ErrorStringPrintf("Out-of-order method_idx for annotation: %x then %x",
+ last_idx, method_item->method_idx_);
return false;
}
last_idx = method_item->method_idx_;
@@ -950,15 +954,15 @@
reinterpret_cast<const DexFile::ParameterAnnotationsItem*>(method_item);
uint32_t parameter_count = item->parameters_size_;
if (!CheckListSize(parameter_item, parameter_count, sizeof(DexFile::ParameterAnnotationsItem),
- "parameter_annotations list")) {
+ "parameter_annotations list")) {
return false;
}
last_idx = 0;
for (uint32_t i = 0; i < parameter_count; i++) {
- if (last_idx >= parameter_item->method_idx_ && i != 0) {
- LOG(ERROR) << StringPrintf("Out-of-order method_idx for annotation: %x then %x",
- last_idx, parameter_item->method_idx_);
+ if (UNLIKELY(last_idx >= parameter_item->method_idx_ && i != 0)) {
+ ErrorStringPrintf("Out-of-order method_idx for annotation: %x then %x",
+ last_idx, parameter_item->method_idx_);
return false;
}
last_idx = parameter_item->method_idx_;
@@ -1059,7 +1063,7 @@
if (!CheckPointerRange(list, list + 1, "annotation_set_ref_list") ||
!CheckListSize(item, count, sizeof(DexFile::AnnotationSetRefItem),
- "annotation_set_ref_list size")) {
+ "annotation_set_ref_list size")) {
return false;
}
ptr_ = reinterpret_cast<const byte*>(item + count);
@@ -1121,7 +1125,7 @@
break;
}
default:
- LOG(ERROR) << StringPrintf("Unknown map item type %x", type);
+ ErrorStringPrintf("Unknown map item type %x", type);
return false;
}
@@ -1130,8 +1134,8 @@
}
aligned_offset = reinterpret_cast<uint32_t>(ptr_) - reinterpret_cast<uint32_t>(begin_);
- if (aligned_offset > size_) {
- LOG(ERROR) << StringPrintf("Item %d at ends out of bounds", i);
+ if (UNLIKELY(aligned_offset > size_)) {
+ ErrorStringPrintf("Item %d at ends out of bounds", i);
return false;
}
@@ -1172,17 +1176,17 @@
expected_size = header_->class_defs_size_;
break;
default:
- LOG(ERROR) << StringPrintf("Bad type for id section: %x", type);
+ ErrorStringPrintf("Bad type for id section: %x", type);
return false;
}
// Check that the offset and size are what were expected from the header.
- if (offset != expected_offset) {
- LOG(ERROR) << StringPrintf("Bad offset for section: got %x, expected %x", offset, expected_offset);
+ if (UNLIKELY(offset != expected_offset)) {
+ ErrorStringPrintf("Bad offset for section: got %x, expected %x", offset, expected_offset);
return false;
}
- if (count != expected_size) {
- LOG(ERROR) << StringPrintf("Bad size for section: got %x, expected %x", count, expected_size);
+ if (UNLIKELY(count != expected_size)) {
+ ErrorStringPrintf("Bad size for section: got %x, expected %x", count, expected_size);
return false;
}
@@ -1194,8 +1198,8 @@
uint32_t data_end = data_start + header_->data_size_;
// Sanity check the offset of the section.
- if ((offset < data_start) || (offset > data_end)) {
- LOG(ERROR) << StringPrintf("Bad offset for data subsection: %x", offset);
+ if (UNLIKELY((offset < data_start) || (offset > data_end))) {
+ ErrorStringPrintf("Bad offset for data subsection: %x", offset);
return false;
}
@@ -1205,7 +1209,7 @@
uint32_t next_offset = reinterpret_cast<uint32_t>(ptr_) - reinterpret_cast<uint32_t>(begin_);
if (next_offset > data_end) {
- LOG(ERROR) << StringPrintf("Out-of-bounds end of data subsection: %x", next_offset);
+ ErrorStringPrintf("Out-of-bounds end of data subsection: %x", next_offset);
return false;
}
@@ -1229,20 +1233,20 @@
// Check for padding and overlap between items.
if (!CheckPadding(offset, section_offset)) {
return false;
- } else if (offset > section_offset) {
- LOG(ERROR) << StringPrintf("Section overlap or out-of-order map: %x, %x", offset, section_offset);
+ } else if (UNLIKELY(offset > section_offset)) {
+ ErrorStringPrintf("Section overlap or out-of-order map: %x, %x", offset, section_offset);
return false;
}
// Check each item based on its type.
switch (type) {
case DexFile::kDexTypeHeaderItem:
- if (section_count != 1) {
- LOG(ERROR) << "Multiple header items";
+ if (UNLIKELY(section_count != 1)) {
+ ErrorStringPrintf("Multiple header items");
return false;
}
- if (section_offset != 0) {
- LOG(ERROR) << StringPrintf("Header at %x, not at start of file", section_offset);
+ if (UNLIKELY(section_offset != 0)) {
+ ErrorStringPrintf("Header at %x, not at start of file", section_offset);
return false;
}
ptr_ = begin_ + header_->header_size_;
@@ -1260,13 +1264,13 @@
offset = reinterpret_cast<uint32_t>(ptr_) - reinterpret_cast<uint32_t>(begin_);
break;
case DexFile::kDexTypeMapList:
- if (section_count != 1) {
- LOG(ERROR) << "Multiple map list items";
+ if (UNLIKELY(section_count != 1)) {
+ ErrorStringPrintf("Multiple map list items");
return false;
}
- if (section_offset != header_->map_off_) {
- LOG(ERROR) << StringPrintf("Map not at header-defined offset: %x, expected %x",
- section_offset, header_->map_off_);
+ if (UNLIKELY(section_offset != header_->map_off_)) {
+ ErrorStringPrintf("Map not at header-defined offset: %x, expected %x",
+ section_offset, header_->map_off_);
return false;
}
ptr_ += sizeof(uint32_t) + (map->size_ * sizeof(DexFile::MapItem));
@@ -1288,7 +1292,7 @@
offset = reinterpret_cast<uint32_t>(ptr_) - reinterpret_cast<uint32_t>(begin_);
break;
default:
- LOG(ERROR) << StringPrintf("Unknown map item type %x", type);
+ ErrorStringPrintf("Unknown map item type %x", type);
return false;
}
@@ -1300,13 +1304,13 @@
bool DexFileVerifier::CheckOffsetToTypeMap(uint32_t offset, uint16_t type) {
auto it = offset_to_type_map_.find(offset);
- if (it == offset_to_type_map_.end()) {
- LOG(ERROR) << StringPrintf("No data map entry found @ %x; expected %x", offset, type);
+ if (UNLIKELY(it == offset_to_type_map_.end())) {
+ ErrorStringPrintf("No data map entry found @ %x; expected %x", offset, type);
return false;
}
- if (it->second != type) {
- LOG(ERROR) << StringPrintf("Unexpected data map entry @ %x; expected %x, found %x",
- offset, type, it->second);
+ if (UNLIKELY(it->second != type)) {
+ ErrorStringPrintf("Unexpected data map entry @ %x; expected %x, found %x",
+ offset, type, it->second);
return false;
}
return true;
@@ -1365,8 +1369,8 @@
const DexFile::StringId* prev_item = reinterpret_cast<const DexFile::StringId*>(previous_item_);
const char* prev_str = dex_file_->GetStringData(*prev_item);
const char* str = dex_file_->GetStringData(*item);
- if (CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues(prev_str, str) >= 0) {
- LOG(ERROR) << StringPrintf("Out-of-order string_ids: '%s' then '%s'", prev_str, str);
+ if (UNLIKELY(CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues(prev_str, str) >= 0)) {
+ ErrorStringPrintf("Out-of-order string_ids: '%s' then '%s'", prev_str, str);
return false;
}
}
@@ -1380,17 +1384,17 @@
const char* descriptor = dex_file_->StringDataByIdx(item->descriptor_idx_);
// Check that the descriptor is a valid type.
- if (!IsValidDescriptor(descriptor)) {
- LOG(ERROR) << StringPrintf("Invalid type descriptor: '%s'", descriptor);
+ if (UNLIKELY(!IsValidDescriptor(descriptor))) {
+ ErrorStringPrintf("Invalid type descriptor: '%s'", descriptor);
return false;
}
// Check ordering between items.
if (previous_item_ != NULL) {
const DexFile::TypeId* prev_item = reinterpret_cast<const DexFile::TypeId*>(previous_item_);
- if (prev_item->descriptor_idx_ >= item->descriptor_idx_) {
- LOG(ERROR) << StringPrintf("Out-of-order type_ids: %x then %x",
- prev_item->descriptor_idx_, item->descriptor_idx_);
+ if (UNLIKELY(prev_item->descriptor_idx_ >= item->descriptor_idx_)) {
+ ErrorStringPrintf("Out-of-order type_ids: %x then %x",
+ prev_item->descriptor_idx_, item->descriptor_idx_);
return false;
}
}
@@ -1422,16 +1426,16 @@
it.Next();
shorty++;
}
- if (it.HasNext() || *shorty != '\0') {
- LOG(ERROR) << "Mismatched length for parameters and shorty";
+ if (UNLIKELY(it.HasNext() || *shorty != '\0')) {
+ ErrorStringPrintf("Mismatched length for parameters and shorty");
return false;
}
// Check ordering between items. This relies on type_ids being in order.
if (previous_item_ != NULL) {
const DexFile::ProtoId* prev = reinterpret_cast<const DexFile::ProtoId*>(previous_item_);
- if (prev->return_type_idx_ > item->return_type_idx_) {
- LOG(ERROR) << "Out-of-order proto_id return types";
+ if (UNLIKELY(prev->return_type_idx_ > item->return_type_idx_)) {
+ ErrorStringPrintf("Out-of-order proto_id return types");
return false;
} else if (prev->return_type_idx_ == item->return_type_idx_) {
DexFileParameterIterator curr_it(*dex_file_, *item);
@@ -1443,15 +1447,15 @@
if (prev_idx == DexFile::kDexNoIndex16) {
break;
}
- if (curr_idx == DexFile::kDexNoIndex16) {
- LOG(ERROR) << "Out-of-order proto_id arguments";
+ if (UNLIKELY(curr_idx == DexFile::kDexNoIndex16)) {
+ ErrorStringPrintf("Out-of-order proto_id arguments");
return false;
}
if (prev_idx < curr_idx) {
break;
- } else if (prev_idx > curr_idx) {
- LOG(ERROR) << "Out-of-order proto_id arguments";
+ } else if (UNLIKELY(prev_idx > curr_idx)) {
+ ErrorStringPrintf("Out-of-order proto_id arguments");
return false;
}
@@ -1470,38 +1474,38 @@
// Check that the class descriptor is valid.
const char* descriptor = dex_file_->StringByTypeIdx(item->class_idx_);
- if (!IsValidDescriptor(descriptor) || descriptor[0] != 'L') {
- LOG(ERROR) << "Invalid descriptor for class_idx: '" << descriptor << '"';
+ if (UNLIKELY(!IsValidDescriptor(descriptor) || descriptor[0] != 'L')) {
+ ErrorStringPrintf("Invalid descriptor for class_idx: '%s'", descriptor);
return false;
}
// Check that the type descriptor is a valid field name.
descriptor = dex_file_->StringByTypeIdx(item->type_idx_);
- if (!IsValidDescriptor(descriptor) || descriptor[0] == 'V') {
- LOG(ERROR) << "Invalid descriptor for type_idx: '" << descriptor << '"';
+ if (UNLIKELY(!IsValidDescriptor(descriptor) || descriptor[0] == 'V')) {
+ ErrorStringPrintf("Invalid descriptor for type_idx: '%s'", descriptor);
return false;
}
// Check that the name is valid.
descriptor = dex_file_->StringDataByIdx(item->name_idx_);
- if (!IsValidMemberName(descriptor)) {
- LOG(ERROR) << "Invalid field name: '" << descriptor << '"';
+ if (UNLIKELY(!IsValidMemberName(descriptor))) {
+ ErrorStringPrintf("Invalid field name: '%s'", descriptor);
return false;
}
// Check ordering between items. This relies on the other sections being in order.
if (previous_item_ != NULL) {
const DexFile::FieldId* prev_item = reinterpret_cast<const DexFile::FieldId*>(previous_item_);
- if (prev_item->class_idx_ > item->class_idx_) {
- LOG(ERROR) << "Out-of-order field_ids";
+ if (UNLIKELY(prev_item->class_idx_ > item->class_idx_)) {
+ ErrorStringPrintf("Out-of-order field_ids");
return false;
} else if (prev_item->class_idx_ == item->class_idx_) {
- if (prev_item->name_idx_ > item->name_idx_) {
- LOG(ERROR) << "Out-of-order field_ids";
+ if (UNLIKELY(prev_item->name_idx_ > item->name_idx_)) {
+ ErrorStringPrintf("Out-of-order field_ids");
return false;
} else if (prev_item->name_idx_ == item->name_idx_) {
- if (prev_item->type_idx_ >= item->type_idx_) {
- LOG(ERROR) << "Out-of-order field_ids";
+ if (UNLIKELY(prev_item->type_idx_ >= item->type_idx_)) {
+ ErrorStringPrintf("Out-of-order field_ids");
return false;
}
}
@@ -1517,31 +1521,31 @@
// Check that the class descriptor is a valid reference name.
const char* descriptor = dex_file_->StringByTypeIdx(item->class_idx_);
- if (!IsValidDescriptor(descriptor) || (descriptor[0] != 'L' && descriptor[0] != '[')) {
- LOG(ERROR) << "Invalid descriptor for class_idx: '" << descriptor << '"';
+ if (UNLIKELY(!IsValidDescriptor(descriptor) || (descriptor[0] != 'L' && descriptor[0] != '['))) {
+ ErrorStringPrintf("Invalid descriptor for class_idx: '%s'", descriptor);
return false;
}
// Check that the name is valid.
descriptor = dex_file_->StringDataByIdx(item->name_idx_);
- if (!IsValidMemberName(descriptor)) {
- LOG(ERROR) << "Invalid method name: '" << descriptor << '"';
+ if (UNLIKELY(!IsValidMemberName(descriptor))) {
+ ErrorStringPrintf("Invalid method name: '%s'", descriptor);
return false;
}
// Check ordering between items. This relies on the other sections being in order.
if (previous_item_ != NULL) {
const DexFile::MethodId* prev_item = reinterpret_cast<const DexFile::MethodId*>(previous_item_);
- if (prev_item->class_idx_ > item->class_idx_) {
- LOG(ERROR) << "Out-of-order method_ids";
+ if (UNLIKELY(prev_item->class_idx_ > item->class_idx_)) {
+ ErrorStringPrintf("Out-of-order method_ids");
return false;
} else if (prev_item->class_idx_ == item->class_idx_) {
- if (prev_item->name_idx_ > item->name_idx_) {
- LOG(ERROR) << "Out-of-order method_ids";
+ if (UNLIKELY(prev_item->name_idx_ > item->name_idx_)) {
+ ErrorStringPrintf("Out-of-order method_ids");
return false;
} else if (prev_item->name_idx_ == item->name_idx_) {
- if (prev_item->proto_idx_ >= item->proto_idx_) {
- LOG(ERROR) << "Out-of-order method_ids";
+ if (UNLIKELY(prev_item->proto_idx_ >= item->proto_idx_)) {
+ ErrorStringPrintf("Out-of-order method_ids");
return false;
}
}
@@ -1557,8 +1561,8 @@
uint32_t class_idx = item->class_idx_;
const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
- if (!IsValidDescriptor(descriptor) || descriptor[0] != 'L') {
- LOG(ERROR) << "Invalid class descriptor: '" << descriptor << "'";
+ if (UNLIKELY(!IsValidDescriptor(descriptor) || descriptor[0] != 'L')) {
+ ErrorStringPrintf("Invalid class descriptor: '%s'", descriptor);
return false;
}
@@ -1581,8 +1585,8 @@
if (item->superclass_idx_ != DexFile::kDexNoIndex16) {
descriptor = dex_file_->StringByTypeIdx(item->superclass_idx_);
- if (!IsValidDescriptor(descriptor) || descriptor[0] != 'L') {
- LOG(ERROR) << "Invalid superclass: '" << descriptor << "'";
+ if (UNLIKELY(!IsValidDescriptor(descriptor) || descriptor[0] != 'L')) {
+ ErrorStringPrintf("Invalid superclass: '%s'", descriptor);
return false;
}
}
@@ -1594,8 +1598,8 @@
// Ensure that all interfaces refer to classes (not arrays or primitives).
for (uint32_t i = 0; i < size; i++) {
descriptor = dex_file_->StringByTypeIdx(interfaces->GetTypeItem(i).type_idx_);
- if (!IsValidDescriptor(descriptor) || descriptor[0] != 'L') {
- LOG(ERROR) << "Invalid interface: '" << descriptor << "'";
+ if (UNLIKELY(!IsValidDescriptor(descriptor) || descriptor[0] != 'L')) {
+ ErrorStringPrintf("Invalid interface: '%s'", descriptor);
return false;
}
}
@@ -1608,8 +1612,8 @@
uint32_t idx1 = interfaces->GetTypeItem(i).type_idx_;
for (uint32_t j =0; j < i; j++) {
uint32_t idx2 = interfaces->GetTypeItem(j).type_idx_;
- if (idx1 == idx2) {
- LOG(ERROR) << "Duplicate interface: '" << dex_file_->StringByTypeIdx(idx1) << "'";
+ if (UNLIKELY(idx1 == idx2)) {
+ ErrorStringPrintf("Duplicate interface: '%s'", dex_file_->StringByTypeIdx(idx1));
return false;
}
}
@@ -1620,8 +1624,8 @@
if (item->class_data_off_ != 0) {
const byte* data = begin_ + item->class_data_off_;
uint16_t data_definer = FindFirstClassDataDefiner(data);
- if ((data_definer != item->class_idx_) && (data_definer != DexFile::kDexNoIndex16)) {
- LOG(ERROR) << "Invalid class_data_item";
+ if (UNLIKELY((data_definer != item->class_idx_) && (data_definer != DexFile::kDexNoIndex16))) {
+ ErrorStringPrintf("Invalid class_data_item");
return false;
}
}
@@ -1630,8 +1634,9 @@
if (item->annotations_off_ != 0) {
const byte* data = begin_ + item->annotations_off_;
uint16_t annotations_definer = FindFirstAnnotationsDirectoryDefiner(data);
- if ((annotations_definer != item->class_idx_) && (annotations_definer != DexFile::kDexNoIndex16)) {
- LOG(ERROR) << "Invalid annotations_directory_item";
+ if (UNLIKELY((annotations_definer != item->class_idx_) &&
+ (annotations_definer != DexFile::kDexNoIndex16))) {
+ ErrorStringPrintf("Invalid annotations_directory_item");
return false;
}
}
@@ -1675,8 +1680,8 @@
const uint8_t* data = annotation->annotation_;
uint32_t idx = DecodeUnsignedLeb128(&data);
- if (last_idx >= idx && i != 0) {
- LOG(ERROR) << StringPrintf("Out-of-order entry types: %x then %x", last_idx, idx);
+ if (UNLIKELY(last_idx >= idx && i != 0)) {
+ ErrorStringPrintf("Out-of-order entry types: %x then %x", last_idx, idx);
return false;
}
@@ -1694,8 +1699,8 @@
for (; it.HasNextStaticField() || it.HasNextInstanceField(); it.Next()) {
const DexFile::FieldId& field = dex_file_->GetFieldId(it.GetMemberIndex());
- if (field.class_idx_ != defining_class) {
- LOG(ERROR) << "Mismatched defining class for class_data_item field";
+ if (UNLIKELY(field.class_idx_ != defining_class)) {
+ ErrorStringPrintf("Mismatched defining class for class_data_item field");
return false;
}
}
@@ -1705,8 +1710,8 @@
return false;
}
const DexFile::MethodId& method = dex_file_->GetMethodId(it.GetMemberIndex());
- if (method.class_idx_ != defining_class) {
- LOG(ERROR) << "Mismatched defining class for class_data_item method";
+ if (UNLIKELY(method.class_idx_ != defining_class)) {
+ ErrorStringPrintf("Mismatched defining class for class_data_item method");
return false;
}
}
@@ -1731,8 +1736,8 @@
uint32_t field_count = item->fields_size_;
for (uint32_t i = 0; i < field_count; i++) {
const DexFile::FieldId& field = dex_file_->GetFieldId(field_item->field_idx_);
- if (field.class_idx_ != defining_class) {
- LOG(ERROR) << "Mismatched defining class for field_annotation";
+ if (UNLIKELY(field.class_idx_ != defining_class)) {
+ ErrorStringPrintf("Mismatched defining class for field_annotation");
return false;
}
if (!CheckOffsetToTypeMap(field_item->annotations_off_, DexFile::kDexTypeAnnotationSetItem)) {
@@ -1747,8 +1752,8 @@
uint32_t method_count = item->methods_size_;
for (uint32_t i = 0; i < method_count; i++) {
const DexFile::MethodId& method = dex_file_->GetMethodId(method_item->method_idx_);
- if (method.class_idx_ != defining_class) {
- LOG(ERROR) << "Mismatched defining class for method_annotation";
+ if (UNLIKELY(method.class_idx_ != defining_class)) {
+ ErrorStringPrintf("Mismatched defining class for method_annotation");
return false;
}
if (!CheckOffsetToTypeMap(method_item->annotations_off_, DexFile::kDexTypeAnnotationSetItem)) {
@@ -1763,8 +1768,8 @@
uint32_t parameter_count = item->parameters_size_;
for (uint32_t i = 0; i < parameter_count; i++) {
const DexFile::MethodId& parameter_method = dex_file_->GetMethodId(parameter_item->method_idx_);
- if (parameter_method.class_idx_ != defining_class) {
- LOG(ERROR) << "Mismatched defining class for parameter_annotation";
+ if (UNLIKELY(parameter_method.class_idx_ != defining_class)) {
+ ErrorStringPrintf("Mismatched defining class for parameter_annotation");
return false;
}
if (!CheckOffsetToTypeMap(parameter_item->annotations_off_,
@@ -1860,7 +1865,7 @@
break;
}
default:
- LOG(ERROR) << StringPrintf("Unknown map item type %x", type);
+ ErrorStringPrintf("Unknown map item type %x", type);
return false;
}
@@ -1908,7 +1913,7 @@
break;
}
default:
- LOG(ERROR) << StringPrintf("Unknown map item type %x", type);
+ ErrorStringPrintf("Unknown map item type %x", type);
return false;
}
@@ -1942,4 +1947,13 @@
return true;
}
+void DexFileVerifier::ErrorStringPrintf(const char* fmt, ...) {
+ va_list ap;
+ va_start(ap, fmt);
+ DCHECK(failure_reason_.empty()) << failure_reason_;
+ failure_reason_ = StringPrintf("Failure to verify dex file '%s': ", location_);
+ StringAppendV(&failure_reason_, fmt, ap);
+ va_end(ap);
+}
+
} // namespace art
diff --git a/runtime/dex_file_verifier.h b/runtime/dex_file_verifier.h
index 3797dc7..4b8b80a 100644
--- a/runtime/dex_file_verifier.h
+++ b/runtime/dex_file_verifier.h
@@ -24,29 +24,35 @@
class DexFileVerifier {
public:
- static bool Verify(const DexFile* dex_file, const byte* begin, size_t size);
+ static bool Verify(const DexFile* dex_file, const byte* begin, size_t size,
+ const char* location, std::string* error_msg);
+
+ const std::string& FailureReason() const {
+ return failure_reason_;
+ }
private:
- DexFileVerifier(const DexFile* dex_file, const byte* begin, size_t size)
- : dex_file_(dex_file), begin_(begin), size_(size),
+ DexFileVerifier(const DexFile* dex_file, const byte* begin, size_t size, const char* location)
+ : dex_file_(dex_file), begin_(begin), size_(size), location_(location),
header_(&dex_file->GetHeader()), ptr_(NULL), previous_item_(NULL) {
}
bool Verify();
- bool CheckPointerRange(const void* start, const void* end, const char* label) const;
- bool CheckListSize(const void* start, uint32_t count, uint32_t element_size, const char* label) const;
- bool CheckIndex(uint32_t field, uint32_t limit, const char* label) const;
+ bool CheckShortyDescriptorMatch(char shorty_char, const char* descriptor, bool is_return_type);
+ bool CheckPointerRange(const void* start, const void* end, const char* label);
+ bool CheckListSize(const void* start, uint32_t count, uint32_t element_size, const char* label);
+ bool CheckIndex(uint32_t field, uint32_t limit, const char* label);
- bool CheckHeader() const;
- bool CheckMap() const;
+ bool CheckHeader();
+ bool CheckMap();
uint32_t ReadUnsignedLittleEndian(uint32_t size);
bool CheckAndGetHandlerOffsets(const DexFile::CodeItem* code_item,
- uint32_t* handler_offsets, uint32_t handlers_size);
- bool CheckClassDataItemField(uint32_t idx, uint32_t access_flags, bool expect_static) const;
+ uint32_t* handler_offsets, uint32_t handlers_size);
+ bool CheckClassDataItemField(uint32_t idx, uint32_t access_flags, bool expect_static);
bool CheckClassDataItemMethod(uint32_t idx, uint32_t access_flags, uint32_t code_offset,
- bool expect_direct) const;
+ bool expect_direct);
bool CheckPadding(uint32_t offset, uint32_t aligned_offset);
bool CheckEncodedValue();
bool CheckEncodedArray();
@@ -82,14 +88,20 @@
bool CheckInterSectionIterate(uint32_t offset, uint32_t count, uint16_t type);
bool CheckInterSection();
- const DexFile* dex_file_;
- const byte* begin_;
- size_t size_;
- const DexFile::Header* header_;
+ void ErrorStringPrintf(const char* fmt, ...)
+ __attribute__((__format__(__printf__, 2, 3))) COLD_ATTR;
+
+ const DexFile* const dex_file_;
+ const byte* const begin_;
+ const size_t size_;
+ const char* const location_;
+ const DexFile::Header* const header_;
SafeMap<uint32_t, uint16_t> offset_to_type_map_;
const byte* ptr_;
const void* previous_item_;
+
+ std::string failure_reason_;
};
} // namespace art
diff --git a/runtime/dex_method_iterator_test.cc b/runtime/dex_method_iterator_test.cc
index 64c645e..9961df9 100644
--- a/runtime/dex_method_iterator_test.cc
+++ b/runtime/dex_method_iterator_test.cc
@@ -20,16 +20,27 @@
namespace art {
-class DexMethodIteratorTest : public CommonTest {};
+class DexMethodIteratorTest : public CommonTest {
+ public:
+ const DexFile* OpenDexFile(const std::string& partial_filename) {
+ std::string dfn = GetDexFileName(partial_filename);
+ std::string error_msg;
+ const DexFile* dexfile = DexFile::Open(dfn.c_str(), dfn.c_str(), &error_msg);
+ if (dexfile == nullptr) {
+ LG << "Failed to open '" << dfn << "': " << error_msg;
+ }
+ return dexfile;
+ }
+};
TEST_F(DexMethodIteratorTest, Basic) {
ScopedObjectAccess soa(Thread::Current());
std::vector<const DexFile*> dex_files;
- dex_files.push_back(DexFile::Open(GetDexFileName("core"), GetDexFileName("core")));
- dex_files.push_back(DexFile::Open(GetDexFileName("conscrypt"), GetDexFileName("conscrypt")));
- dex_files.push_back(DexFile::Open(GetDexFileName("okhttp"), GetDexFileName("okhttp")));
- dex_files.push_back(DexFile::Open(GetDexFileName("core-junit"), GetDexFileName("core-junit")));
- dex_files.push_back(DexFile::Open(GetDexFileName("bouncycastle"), GetDexFileName("bouncycastle")));
+ dex_files.push_back(OpenDexFile("core"));
+ dex_files.push_back(OpenDexFile("conscrypt"));
+ dex_files.push_back(OpenDexFile("okhttp"));
+ dex_files.push_back(OpenDexFile("core-junit"));
+ dex_files.push_back(OpenDexFile("bouncycastle"));
DexMethodIterator it(dex_files);
while (it.HasNext()) {
const DexFile& dex_file = it.GetDexFile();
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index dfb6819..15c95f2 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -40,15 +40,16 @@
symtab_symbol_table_(NULL),
dynsym_symbol_table_(NULL) {}
-ElfFile* ElfFile::Open(File* file, bool writable, bool program_header_only) {
+ElfFile* ElfFile::Open(File* file, bool writable, bool program_header_only,
+ std::string* error_msg) {
UniquePtr<ElfFile> elf_file(new ElfFile());
- if (!elf_file->Setup(file, writable, program_header_only)) {
- return NULL;
+ if (!elf_file->Setup(file, writable, program_header_only, error_msg)) {
+ return nullptr;
}
return elf_file.release();
}
-bool ElfFile::Setup(File* file, bool writable, bool program_header_only) {
+bool ElfFile::Setup(File* file, bool writable, bool program_header_only, std::string* error_msg) {
CHECK(file != NULL);
file_ = file;
writable_ = writable;
@@ -66,40 +67,42 @@
int64_t file_length = file_->GetLength();
if (file_length < 0) {
errno = -file_length;
- PLOG(WARNING) << "Failed to get length of file: " << file_->GetPath() << " fd=" << file_->Fd();
+ *error_msg = StringPrintf("Failed to get length of file: '%s' fd=%d: %s",
+ file_->GetPath().c_str(), file_->Fd(), strerror(errno));
return false;
}
if (file_length < sizeof(llvm::ELF::Elf32_Ehdr)) {
- if (writable) {
- LOG(WARNING) << "File size of " << file_length
- << " bytes not large enough to contain ELF header of "
- << sizeof(llvm::ELF::Elf32_Ehdr) << " bytes: " << file_->GetPath();
- }
+ *error_msg = StringPrintf("File size of %lld bytes not large enough to contain ELF header of "
+ "%zd bytes: '%s'", file_length, sizeof(llvm::ELF::Elf32_Ehdr),
+ file_->GetPath().c_str());
return false;
}
if (program_header_only) {
// first just map ELF header to get program header size information
size_t elf_header_size = sizeof(llvm::ELF::Elf32_Ehdr);
- if (!SetMap(MemMap::MapFile(elf_header_size, prot, flags, file_->Fd(), 0))) {
+ if (!SetMap(MemMap::MapFile(elf_header_size, prot, flags, file_->Fd(), 0,
+ file_->GetPath().c_str(), error_msg))) {
return false;
}
// then remap to cover program header
size_t program_header_size = header_->e_phoff + (header_->e_phentsize * header_->e_phnum);
if (file_length < program_header_size) {
- LOG(WARNING) << "File size of " << file_length
- << " bytes not large enough to contain ELF program header of "
- << program_header_size << " bytes: " << file_->GetPath();
+ *error_msg = StringPrintf("File size of %lld bytes not large enough to contain ELF program "
+ "header of %zd bytes: '%s'", file_length,
+ sizeof(llvm::ELF::Elf32_Ehdr), file_->GetPath().c_str());
return false;
}
- if (!SetMap(MemMap::MapFile(program_header_size, prot, flags, file_->Fd(), 0))) {
- LOG(WARNING) << "Failed to map ELF program headers: " << file_->GetPath();
+ if (!SetMap(MemMap::MapFile(program_header_size, prot, flags, file_->Fd(), 0,
+ file_->GetPath().c_str(), error_msg))) {
+ *error_msg = StringPrintf("Failed to map ELF program headers: %s", error_msg->c_str());
return false;
}
} else {
// otherwise map entire file
- if (!SetMap(MemMap::MapFile(file_->GetLength(), prot, flags, file_->Fd(), 0))) {
- LOG(WARNING) << "Failed to map ELF file: " << file_->GetPath();
+ if (!SetMap(MemMap::MapFile(file_->GetLength(), prot, flags, file_->Fd(), 0,
+ file_->GetPath().c_str(), error_msg))) {
+ *error_msg = StringPrintf("Failed to map ELF file: %s", error_msg->c_str());
return false;
}
}
@@ -114,7 +117,8 @@
// Find .dynamic section info from program header
dynamic_program_header_ = FindProgamHeaderByType(llvm::ELF::PT_DYNAMIC);
if (dynamic_program_header_ == NULL) {
- LOG(WARNING) << "Failed to find PT_DYNAMIC program header in ELF file: " << file_->GetPath();
+ *error_msg = StringPrintf("Failed to find PT_DYNAMIC program header in ELF file: '%s'",
+ file_->GetPath().c_str());
return false;
}
@@ -596,7 +600,7 @@
return loaded_size;
}
-bool ElfFile::Load(bool executable) {
+bool ElfFile::Load(bool executable, std::string* error_msg) {
// TODO: actually return false error
CHECK(program_header_only_) << file_->GetPath();
for (llvm::ELF::Elf32_Word i = 0; i < GetProgramHeaderNum(); i++) {
@@ -628,9 +632,10 @@
if (program_header.p_vaddr == 0) {
std::string reservation_name("ElfFile reservation for ");
reservation_name += file_->GetPath();
+ std::string error_msg;
UniquePtr<MemMap> reserve(MemMap::MapAnonymous(reservation_name.c_str(),
- NULL, GetLoadedSize(), PROT_NONE));
- CHECK(reserve.get() != NULL) << file_->GetPath();
+ NULL, GetLoadedSize(), PROT_NONE, &error_msg));
+ CHECK(reserve.get() != NULL) << file_->GetPath() << ": " << error_msg;
base_address_ = reserve->Begin();
segments_.push_back(reserve.release());
}
@@ -657,18 +662,20 @@
flags |= MAP_PRIVATE;
}
if (file_length < (program_header.p_offset + program_header.p_memsz)) {
- LOG(WARNING) << "File size of " << file_length
- << " bytes not large enough to contain ELF segment " << i
- << " of " << (program_header.p_offset + program_header.p_memsz)
- << " bytes: " << file_->GetPath();
+ *error_msg = StringPrintf("File size of %lld bytes not large enough to contain ELF segment "
+ "%d of %d bytes: '%s'", file_length, i,
+ program_header.p_offset + program_header.p_memsz,
+ file_->GetPath().c_str());
return false;
}
UniquePtr<MemMap> segment(MemMap::MapFileAtAddress(p_vaddr,
program_header.p_memsz,
prot, flags, file_->Fd(),
program_header.p_offset,
- true));
- CHECK(segment.get() != NULL) << file_->GetPath();
+ true,
+ file_->GetPath().c_str(),
+ error_msg));
+ CHECK(segment.get() != nullptr) << *error_msg;
CHECK_EQ(segment->Begin(), p_vaddr) << file_->GetPath();
segments_.push_back(segment.release());
}
diff --git a/runtime/elf_file.h b/runtime/elf_file.h
index 33b5fc3..b025137 100644
--- a/runtime/elf_file.h
+++ b/runtime/elf_file.h
@@ -35,7 +35,7 @@
// ELFObjectFile.
class ElfFile {
public:
- static ElfFile* Open(File* file, bool writable, bool program_header_only);
+ static ElfFile* Open(File* file, bool writable, bool program_header_only, std::string* error_msg);
~ElfFile();
// Load segments into memory based on PT_LOAD program headers
@@ -115,12 +115,12 @@
// Load segments into memory based on PT_LOAD program headers.
// executable is true at run time, false at compile time.
- bool Load(bool executable);
+ bool Load(bool executable, std::string* error_msg);
private:
ElfFile();
- bool Setup(File* file, bool writable, bool program_header_only);
+ bool Setup(File* file, bool writable, bool program_header_only, std::string* error_msg);
bool SetMap(MemMap* map);
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 12291c3..01d3549 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -19,6 +19,7 @@
#include "dex_file-inl.h"
#include "dex_instruction-inl.h"
#include "entrypoints/entrypoint_utils.h"
+#include "gc/accounting/card_table-inl.h"
#include "interpreter/interpreter.h"
#include "invoke_arg_array_builder.h"
#include "mirror/art_method-inl.h"
@@ -547,6 +548,21 @@
} else if (invoke_type == kInterface) {
called = receiver->GetClass()->FindVirtualMethodForInterface(called);
}
+ if ((invoke_type == kVirtual) || (invoke_type == kInterface)) {
+ // We came here because of sharpening. Ensure the dex cache is up-to-date on the method index
+ // of the sharpened method.
+ if (called->GetDexCacheResolvedMethods() == caller->GetDexCacheResolvedMethods()) {
+ caller->GetDexCacheResolvedMethods()->Set(called->GetDexMethodIndex(), called);
+ } else {
+ // Calling from one dex file to another, need to compute the method index appropriate to
+ // the caller's dex file.
+ uint32_t method_index =
+ MethodHelper(called).FindDexMethodIndexInOtherDexFile(MethodHelper(caller).GetDexFile());
+ if (method_index != DexFile::kDexNoIndex) {
+ caller->GetDexCacheResolvedMethods()->Set(method_index, called);
+ }
+ }
+ }
// Ensure that the called method's class is initialized.
mirror::Class* called_class = called->GetDeclaringClass();
linker->EnsureInitialized(called_class, true, true);
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 2e6b0a8..a5f9997 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -146,7 +146,7 @@
ScopedObjectAccess soa(env);
std::vector<uintptr_t> fake_stack;
- ASSERT_EQ(kStackAlignment, 16);
+ ASSERT_EQ(kStackAlignment, 16U);
ASSERT_EQ(sizeof(uintptr_t), sizeof(uint32_t));
#if !defined(ART_USE_PORTABLE_COMPILER)
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 997d725..8fa5b86 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -163,8 +163,10 @@
// Size in number of elements.
void Init() {
- mem_map_.reset(MemMap::MapAnonymous(name_.c_str(), NULL, capacity_ * sizeof(T), PROT_READ | PROT_WRITE));
- CHECK(mem_map_.get() != NULL) << "couldn't allocate mark stack";
+ std::string error_msg;
+ mem_map_.reset(MemMap::MapAnonymous(name_.c_str(), NULL, capacity_ * sizeof(T),
+ PROT_READ | PROT_WRITE, &error_msg));
+ CHECK(mem_map_.get() != NULL) << "couldn't allocate mark stack.\n" << error_msg;
byte* addr = mem_map_->Begin();
CHECK(addr != NULL);
debug_is_sorted_ = true;
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index 85034a0..7818bc8 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -54,9 +54,11 @@
/* Set up the card table */
size_t capacity = heap_capacity / kCardSize;
/* Allocate an extra 256 bytes to allow fixed low-byte of base */
+ std::string error_msg;
UniquePtr<MemMap> mem_map(MemMap::MapAnonymous("card table", NULL,
- capacity + 256, PROT_READ | PROT_WRITE));
- CHECK(mem_map.get() != NULL) << "couldn't allocate card table";
+ capacity + 256, PROT_READ | PROT_WRITE,
+ &error_msg));
+ CHECK(mem_map.get() != NULL) << "couldn't allocate card table: " << error_msg;
// All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we
// don't clear the card table to avoid unnecessary pages being allocated
COMPILE_ASSERT(kCardClean == 0, card_clean_must_be_0);
diff --git a/runtime/gc/accounting/gc_allocator.cc b/runtime/gc/accounting/gc_allocator.cc
index 11d0e67..49d84fa 100644
--- a/runtime/gc/accounting/gc_allocator.cc
+++ b/runtime/gc/accounting/gc_allocator.cc
@@ -22,15 +22,17 @@
namespace art {
namespace gc {
namespace accounting {
- void* RegisterGCAllocation(size_t bytes) {
- Runtime::Current()->GetHeap()->RegisterGCAllocation(bytes);
- return malloc(bytes);
- }
- void RegisterGCDeAllocation(void* p, size_t bytes) {
- Runtime::Current()->GetHeap()->RegisterGCDeAllocation(bytes);
- free(p);
- }
+void* RegisterGcAllocation(size_t bytes) {
+ Runtime::Current()->GetHeap()->RegisterGCAllocation(bytes);
+ return malloc(bytes);
+}
+
+void RegisterGcDeallocation(void* p, size_t bytes) {
+ Runtime::Current()->GetHeap()->RegisterGCDeAllocation(bytes);
+ free(p);
+}
+
} // namespace accounting
} // namespace gc
} // namespace art
diff --git a/runtime/gc/accounting/gc_allocator.h b/runtime/gc/accounting/gc_allocator.h
index 1fba858..4fe9367 100644
--- a/runtime/gc/accounting/gc_allocator.h
+++ b/runtime/gc/accounting/gc_allocator.h
@@ -26,55 +26,56 @@
namespace art {
namespace gc {
namespace accounting {
- void* RegisterGCAllocation(size_t bytes);
- void RegisterGCDeAllocation(void* p, size_t bytes);
- static const bool kMeasureGCMemoryOverhead = false;
+void* RegisterGcAllocation(size_t bytes);
+void RegisterGcDeallocation(void* p, size_t bytes);
- template <typename T>
- class GCAllocatorImpl : public std::allocator<T> {
- public:
- typedef typename std::allocator<T>::value_type value_type;
- typedef typename std::allocator<T>::size_type size_type;
- typedef typename std::allocator<T>::difference_type difference_type;
- typedef typename std::allocator<T>::pointer pointer;
- typedef typename std::allocator<T>::const_pointer const_pointer;
- typedef typename std::allocator<T>::reference reference;
- typedef typename std::allocator<T>::const_reference const_reference;
+static const bool kMeasureGcMemoryOverhead = false;
- // Used internally by STL data structures.
- template <class U>
- GCAllocatorImpl(const GCAllocatorImpl<U>& alloc) throw() {
- }
+template <typename T>
+class GcAllocatorImpl : public std::allocator<T> {
+ public:
+ typedef typename std::allocator<T>::value_type value_type;
+ typedef typename std::allocator<T>::size_type size_type;
+ typedef typename std::allocator<T>::difference_type difference_type;
+ typedef typename std::allocator<T>::pointer pointer;
+ typedef typename std::allocator<T>::const_pointer const_pointer;
+ typedef typename std::allocator<T>::reference reference;
+ typedef typename std::allocator<T>::const_reference const_reference;
- // Used internally by STL data structures.
- GCAllocatorImpl() throw() {
- }
+ // Used internally by STL data structures.
+ template <class U>
+ GcAllocatorImpl(const GcAllocatorImpl<U>& alloc) throw() {
+ }
- // Enables an allocator for objects of one type to allocate storage for objects of another type.
- // Used internally by STL data structures.
- template <class U>
- struct rebind {
- typedef GCAllocatorImpl<U> other;
- };
+ // Used internally by STL data structures.
+ GcAllocatorImpl() throw() {
+ }
- pointer allocate(size_type n, const_pointer hint = 0) {
- return reinterpret_cast<pointer>(RegisterGCAllocation(n * sizeof(T)));
- }
-
- template <typename PT>
- void deallocate(PT p, size_type n) {
- RegisterGCDeAllocation(p, n * sizeof(T));
- }
+ // Enables an allocator for objects of one type to allocate storage for objects of another type.
+ // Used internally by STL data structures.
+ template <class U>
+ struct rebind {
+ typedef GcAllocatorImpl<U> other;
};
- // C++ doesn't allow template typedefs. This is a workaround template typedef which is
- // GCAllocatorImpl<T> if kMeasureGCMemoryOverhead is true, std::allocator<T> otherwise.
- template <typename T>
- class GCAllocator : public TypeStaticIf<kMeasureGCMemoryOverhead,
- GCAllocatorImpl<T>,
- std::allocator<T> >::value {
- };
+ pointer allocate(size_type n, const_pointer hint = 0) {
+ return reinterpret_cast<pointer>(RegisterGcAllocation(n * sizeof(T)));
+ }
+
+ template <typename PT>
+ void deallocate(PT p, size_type n) {
+ RegisterGcDeallocation(p, n * sizeof(T));
+ }
+};
+
+// C++ doesn't allow template typedefs. This is a workaround template typedef which is
+// GCAllocatorImpl<T> if kMeasureGCMemoryOverhead is true, std::allocator<T> otherwise.
+template <typename T>
+class GcAllocator : public TypeStaticIf<kMeasureGcMemoryOverhead, GcAllocatorImpl<T>,
+ std::allocator<T> >::value {
+};
+
} // namespace accounting
} // namespace gc
} // namespace art
diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h
index 2ca8c4a..24ebbaa 100644
--- a/runtime/gc/accounting/heap_bitmap.h
+++ b/runtime/gc/accounting/heap_bitmap.h
@@ -31,8 +31,8 @@
class HeapBitmap {
public:
- typedef std::vector<SpaceBitmap*, GCAllocator<SpaceBitmap*> > SpaceBitmapVector;
- typedef std::vector<SpaceSetMap*, GCAllocator<SpaceSetMap*> > SpaceSetMapVector;
+ typedef std::vector<SpaceBitmap*, GcAllocator<SpaceBitmap*> > SpaceBitmapVector;
+ typedef std::vector<SpaceSetMap*, GcAllocator<SpaceSetMap*> > SpaceSetMapVector;
bool Test(const mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
SpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index d874c60..5a99f1b 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -51,7 +51,7 @@
// cleared between GC phases, reducing the number of dirty cards that need to be scanned.
class ModUnionTable {
public:
- typedef std::set<byte*, std::less<byte*>, GCAllocator<byte*> > CardSet;
+ typedef std::set<byte*, std::less<byte*>, GcAllocator<byte*> > CardSet;
explicit ModUnionTable(const std::string& name, Heap* heap, space::ContinuousSpace* space)
: name_(name),
@@ -125,7 +125,7 @@
// Maps from dirty cards to their corresponding alloc space references.
SafeMap<const byte*, std::vector<mirror::Object**>, std::less<const byte*>,
- GCAllocator<std::pair<const byte*, std::vector<mirror::Object**> > > > references_;
+ GcAllocator<std::pair<const byte*, std::vector<mirror::Object**> > > > references_;
};
// Card caching implementation. Keeps track of which cards we cleared and only this information.
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 63b24ff..52c02f7 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -62,9 +62,11 @@
CHECK(heap_begin != NULL);
// Round up since heap_capacity is not necessarily a multiple of kAlignment * kBitsPerWord.
size_t bitmap_size = OffsetToIndex(RoundUp(heap_capacity, kAlignment * kBitsPerWord)) * kWordSize;
- UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), NULL, bitmap_size, PROT_READ | PROT_WRITE));
- if (mem_map.get() == NULL) {
- LOG(ERROR) << "Failed to allocate bitmap " << name;
+ std::string error_msg;
+ UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), NULL, bitmap_size,
+ PROT_READ | PROT_WRITE, &error_msg));
+ if (UNLIKELY(mem_map.get() == nullptr)) {
+ LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
return NULL;
}
return CreateFromMemMap(name, mem_map.release(), heap_begin, heap_capacity);
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index 4cf8872..21709ad 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -212,7 +212,7 @@
public:
typedef std::set<
const mirror::Object*, std::less<const mirror::Object*>,
- GCAllocator<const mirror::Object*> > Objects;
+ GcAllocator<const mirror::Object*> > Objects;
bool IsEmpty() const {
return contained_.empty();
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index d26e28c..804c669 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -66,7 +66,7 @@
static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
- double target_utilization, size_t capacity, const std::string& original_image_file_name,
+ double target_utilization, size_t capacity, const std::string& image_file_name,
bool concurrent_gc, size_t parallel_gc_threads, size_t conc_gc_threads,
bool low_memory_mode, size_t long_pause_log_threshold, size_t long_gc_log_threshold,
bool ignore_max_footprint)
@@ -144,9 +144,8 @@
// Requested begin for the alloc space, to follow the mapped image and oat files
byte* requested_alloc_space_begin = NULL;
- std::string image_file_name(original_image_file_name);
if (!image_file_name.empty()) {
- space::ImageSpace* image_space = space::ImageSpace::Create(image_file_name);
+ space::ImageSpace* image_space = space::ImageSpace::Create(image_file_name.c_str());
CHECK(image_space != NULL) << "Failed to create space for " << image_file_name;
AddContinuousSpace(image_space);
// Oat files referenced by image files immediately follow them in memory, ensure alloc space
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 468d1d2..8c13d79 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -183,11 +183,12 @@
growth_limit = RoundUp(growth_limit, kPageSize);
capacity = RoundUp(capacity, kPageSize);
+ std::string error_msg;
UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity,
- PROT_READ | PROT_WRITE));
+ PROT_READ | PROT_WRITE, &error_msg));
if (mem_map.get() == NULL) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
- << PrettySize(capacity);
+ << PrettySize(capacity) << ": " << error_msg;
return NULL;
}
@@ -307,7 +308,10 @@
VLOG(heap) << "Size " << GetMemMap()->Size();
VLOG(heap) << "GrowthLimit " << PrettySize(growth_limit);
VLOG(heap) << "Capacity " << PrettySize(capacity);
- UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(alloc_space_name, End(), capacity, PROT_READ | PROT_WRITE));
+ std::string error_msg;
+ UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(alloc_space_name, End(), capacity,
+ PROT_READ | PROT_WRITE, &error_msg));
+ CHECK(mem_map.get() != nullptr) << error_msg;
void* mspace = CreateMallocSpace(end_, starting_size, initial_size);
// Protect memory beyond the initial size.
byte* end = mem_map->Begin() + starting_size;
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 1cd33ee..fa28642 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -44,12 +44,13 @@
live_bitmap_.reset(live_bitmap);
}
-static bool GenerateImage(const std::string& image_file_name) {
+static bool GenerateImage(const std::string& image_file_name, std::string* error_msg) {
const std::string boot_class_path_string(Runtime::Current()->GetBootClassPathString());
std::vector<std::string> boot_class_path;
Split(boot_class_path_string, ':', boot_class_path);
if (boot_class_path.empty()) {
- LOG(FATAL) << "Failed to generate image because no boot class path specified";
+ *error_msg = "Failed to generate image because no boot class path specified";
+ return false;
}
std::vector<std::string> arg_vector;
@@ -112,41 +113,57 @@
return false;
} else {
if (pid == -1) {
- PLOG(ERROR) << "fork failed";
+ *error_msg = StringPrintf("Failed to generate image '%s' because fork failed: %s",
+ image_file_name.c_str(), strerror(errno));
+ return false;
}
// wait for dex2oat to finish
int status;
pid_t got_pid = TEMP_FAILURE_RETRY(waitpid(pid, &status, 0));
if (got_pid != pid) {
- PLOG(ERROR) << "waitpid failed: wanted " << pid << ", got " << got_pid;
+ *error_msg = StringPrintf("Failed to generate image '%s' because waitpid failed: "
+ "wanted %d, got %d: %s",
+ image_file_name.c_str(), pid, got_pid, strerror(errno));
return false;
}
if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
- LOG(ERROR) << dex2oat << " failed: " << command_line;
+ *error_msg = StringPrintf("Failed to generate image '%s' because dex2oat failed: %s",
+ image_file_name.c_str(), command_line.c_str());
return false;
}
}
return true;
}
-ImageSpace* ImageSpace::Create(const std::string& original_image_file_name) {
- if (OS::FileExists(original_image_file_name.c_str())) {
+ImageSpace* ImageSpace::Create(const char* original_image_file_name) {
+ if (OS::FileExists(original_image_file_name)) {
// If the /system file exists, it should be up-to-date, don't try to generate
- return space::ImageSpace::Init(original_image_file_name, false);
+ std::string error_msg;
+ ImageSpace* space = ImageSpace::Init(original_image_file_name, false, &error_msg);
+ if (space == nullptr) {
+ LOG(FATAL) << "Failed to load image '" << original_image_file_name << "': " << error_msg;
+ }
+ return space;
}
// If the /system file didn't exist, we need to use one from the dalvik-cache.
// If the cache file exists, try to open, but if it fails, regenerate.
// If it does not exist, generate.
std::string image_file_name(GetDalvikCacheFilenameOrDie(original_image_file_name));
+ std::string error_msg;
if (OS::FileExists(image_file_name.c_str())) {
- space::ImageSpace* image_space = space::ImageSpace::Init(image_file_name, true);
- if (image_space != NULL) {
+ space::ImageSpace* image_space = ImageSpace::Init(image_file_name.c_str(), true, &error_msg);
+ if (image_space != nullptr) {
return image_space;
}
}
- CHECK(GenerateImage(image_file_name)) << "Failed to generate image: " << image_file_name;
- return space::ImageSpace::Init(image_file_name, true);
+ CHECK(GenerateImage(image_file_name, &error_msg))
+ << "Failed to generate image '" << image_file_name << "': " << error_msg;
+ ImageSpace* space = ImageSpace::Init(image_file_name.c_str(), true, &error_msg);
+ if (space == nullptr) {
+ LOG(FATAL) << "Failed to load image '" << original_image_file_name << "': " << error_msg;
+ }
+ return space;
}
void ImageSpace::VerifyImageAllocations() {
@@ -160,8 +177,9 @@
}
}
-ImageSpace* ImageSpace::Init(const std::string& image_file_name, bool validate_oat_file) {
- CHECK(!image_file_name.empty());
+ImageSpace* ImageSpace::Init(const char* image_file_name, bool validate_oat_file,
+ std::string* error_msg) {
+ CHECK(image_file_name != nullptr);
uint64_t start_time = 0;
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
@@ -169,16 +187,16 @@
LOG(INFO) << "ImageSpace::Init entering image_file_name=" << image_file_name;
}
- UniquePtr<File> file(OS::OpenFileForReading(image_file_name.c_str()));
+ UniquePtr<File> file(OS::OpenFileForReading(image_file_name));
if (file.get() == NULL) {
- LOG(ERROR) << "Failed to open " << image_file_name;
- return NULL;
+ *error_msg = StringPrintf("Failed to open '%s'", image_file_name);
+ return nullptr;
}
ImageHeader image_header;
bool success = file->ReadFully(&image_header, sizeof(image_header));
if (!success || !image_header.IsValid()) {
- LOG(ERROR) << "Invalid image header " << image_file_name;
- return NULL;
+ *error_msg = StringPrintf("Invalid image header in '%s'", image_file_name);
+ return nullptr;
}
// Note: The image header is part of the image due to mmap page alignment required of offset.
@@ -188,10 +206,12 @@
MAP_PRIVATE | MAP_FIXED,
file->Fd(),
0,
- false));
+ false,
+ image_file_name,
+ error_msg));
if (map.get() == NULL) {
- LOG(ERROR) << "Failed to map " << image_file_name;
- return NULL;
+ DCHECK(!error_msg->empty());
+ return nullptr;
}
CHECK_EQ(image_header.GetImageBegin(), map->Begin());
DCHECK_EQ(0, memcmp(&image_header, map->Begin(), sizeof(ImageHeader)));
@@ -199,16 +219,24 @@
UniquePtr<MemMap> image_map(MemMap::MapFileAtAddress(nullptr, image_header.GetImageBitmapSize(),
PROT_READ, MAP_PRIVATE,
file->Fd(), image_header.GetBitmapOffset(),
- false));
- CHECK(image_map.get() != nullptr) << "failed to map image bitmap";
+ false,
+ image_file_name,
+ error_msg));
+ if (image_map.get() == nullptr) {
+ *error_msg = StringPrintf("Failed to map image bitmap: %s", error_msg->c_str());
+ return nullptr;
+ }
size_t bitmap_index = bitmap_index_.fetch_add(1);
- std::string bitmap_name(StringPrintf("imagespace %s live-bitmap %u", image_file_name.c_str(),
+ std::string bitmap_name(StringPrintf("imagespace %s live-bitmap %u", image_file_name,
bitmap_index));
UniquePtr<accounting::SpaceBitmap> bitmap(
accounting::SpaceBitmap::CreateFromMemMap(bitmap_name, image_map.release(),
reinterpret_cast<byte*>(map->Begin()),
map->Size()));
- CHECK(bitmap.get() != nullptr) << "could not create " << bitmap_name;
+ if (bitmap.get() == nullptr) {
+ *error_msg = StringPrintf("Could not create bitmap '%s'", bitmap_name.c_str());
+ return nullptr;
+ }
Runtime* runtime = Runtime::Current();
mirror::Object* resolution_method = image_header.GetImageRoot(ImageHeader::kResolutionMethod);
@@ -226,15 +254,15 @@
space->VerifyImageAllocations();
}
- space->oat_file_.reset(space->OpenOatFile());
- if (space->oat_file_.get() == NULL) {
- LOG(ERROR) << "Failed to open oat file for image: " << image_file_name;
- return NULL;
+ space->oat_file_.reset(space->OpenOatFile(error_msg));
+ if (space->oat_file_.get() == nullptr) {
+ DCHECK(!error_msg->empty());
+ return nullptr;
}
- if (validate_oat_file && !space->ValidateOatFile()) {
- LOG(WARNING) << "Failed to validate oat file for image: " << image_file_name;
- return NULL;
+ if (validate_oat_file && !space->ValidateOatFile(error_msg)) {
+ DCHECK(!error_msg->empty());
+ return nullptr;
}
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
@@ -244,7 +272,7 @@
return space.release();
}
-OatFile* ImageSpace::OpenOatFile() const {
+OatFile* ImageSpace::OpenOatFile(std::string* error_msg) const {
const Runtime* runtime = Runtime::Current();
const ImageHeader& image_header = GetImageHeader();
// Grab location but don't use Object::AsString as we haven't yet initialized the roots to
@@ -255,45 +283,47 @@
oat_filename += runtime->GetHostPrefix();
oat_filename += oat_location->ToModifiedUtf8();
OatFile* oat_file = OatFile::Open(oat_filename, oat_filename, image_header.GetOatDataBegin(),
- !Runtime::Current()->IsCompiler());
+ !Runtime::Current()->IsCompiler(), error_msg);
if (oat_file == NULL) {
- LOG(ERROR) << "Failed to open oat file " << oat_filename << " referenced from image.";
- return NULL;
+ *error_msg = StringPrintf("Failed to open oat file '%s' referenced from image %s: %s",
+ oat_filename.c_str(), GetName(), error_msg->c_str());
+ return nullptr;
}
uint32_t oat_checksum = oat_file->GetOatHeader().GetChecksum();
uint32_t image_oat_checksum = image_header.GetOatChecksum();
if (oat_checksum != image_oat_checksum) {
- LOG(ERROR) << "Failed to match oat file checksum " << std::hex << oat_checksum
- << " to expected oat checksum " << std::hex << image_oat_checksum
- << " in image";
- return NULL;
+ *error_msg = StringPrintf("Failed to match oat file checksum 0x%x to expected oat checksum 0x%x"
+ " in image %s", oat_checksum, image_oat_checksum, GetName());
+ return nullptr;
}
return oat_file;
}
-bool ImageSpace::ValidateOatFile() const {
+bool ImageSpace::ValidateOatFile(std::string* error_msg) const {
CHECK(oat_file_.get() != NULL);
for (const OatFile::OatDexFile* oat_dex_file : oat_file_->GetOatDexFiles()) {
const std::string& dex_file_location = oat_dex_file->GetDexFileLocation();
uint32_t dex_file_location_checksum;
- if (!DexFile::GetChecksum(dex_file_location.c_str(), &dex_file_location_checksum)) {
- LOG(WARNING) << "ValidateOatFile could not find checksum for " << dex_file_location;
+ if (!DexFile::GetChecksum(dex_file_location.c_str(), &dex_file_location_checksum, error_msg)) {
+ *error_msg = StringPrintf("Failed to get checksum of dex file '%s' referenced by image %s: "
+ "%s", dex_file_location.c_str(), GetName(), error_msg->c_str());
return false;
}
if (dex_file_location_checksum != oat_dex_file->GetDexFileLocationChecksum()) {
- LOG(WARNING) << "ValidateOatFile found checksum mismatch between oat file "
- << oat_file_->GetLocation() << " and dex file " << dex_file_location
- << " (" << oat_dex_file->GetDexFileLocationChecksum() << " != "
- << dex_file_location_checksum << ")";
+ *error_msg = StringPrintf("ValidateOatFile found checksum mismatch between oat file '%s' and "
+ "dex file '%s' (0x%x != 0x%x)",
+ oat_file_->GetLocation().c_str(), dex_file_location.c_str(),
+ oat_dex_file->GetDexFileLocationChecksum(),
+ dex_file_location_checksum);
return false;
}
}
return true;
}
-OatFile& ImageSpace::ReleaseOatFile() {
+OatFile* ImageSpace::ReleaseOatFile() {
CHECK(oat_file_.get() != NULL);
- return *oat_file_.release();
+ return oat_file_.release();
}
void ImageSpace::Dump(std::ostream& os) const {
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index 381a98e..78a83c9 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -45,12 +45,11 @@
// creation of the alloc space. The ReleaseOatFile will later be
// used to transfer ownership of the OatFile to the ClassLinker when
// it is initialized.
- static ImageSpace* Create(const std::string& image)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static ImageSpace* Create(const char* image) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Releases the OatFile from the ImageSpace so it can be transfer to
// the caller, presumably the ClassLinker.
- OatFile& ReleaseOatFile()
+ OatFile* ReleaseOatFile()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void VerifyImageAllocations()
@@ -84,13 +83,13 @@
// image's OatFile is up-to-date relative to its DexFile
// inputs. Otherwise (for /data), validate the inputs and generate
// the OatFile in /data/dalvik-cache if necessary.
- static ImageSpace* Init(const std::string& image, bool validate_oat_file)
+ static ImageSpace* Init(const char* image, bool validate_oat_file, std::string* error_msg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- OatFile* OpenOatFile() const
+ OatFile* OpenOatFile(std::string* error_msg) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool ValidateOatFile() const
+ bool ValidateOatFile(std::string* error_msg) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
friend class Space;
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index c6d028e..1321b19 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -56,10 +56,13 @@
return new LargeObjectMapSpace(name);
}
-mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
+mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
+ size_t* bytes_allocated) {
+ std::string error_msg;
MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", NULL, num_bytes,
- PROT_READ | PROT_WRITE);
- if (mem_map == NULL) {
+ PROT_READ | PROT_WRITE, &error_msg);
+ if (UNLIKELY(mem_map == NULL)) {
+ LOG(WARNING) << "Large object allocation failed: " << error_msg;
return NULL;
}
MutexLock mu(self, lock_);
@@ -129,9 +132,10 @@
FreeListSpace* FreeListSpace::Create(const std::string& name, byte* requested_begin, size_t size) {
CHECK_EQ(size % kAlignment, 0U);
+ std::string error_msg;
MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
- PROT_READ | PROT_WRITE);
- CHECK(mem_map != NULL) << "Failed to allocate large object space mem map";
+ PROT_READ | PROT_WRITE, &error_msg);
+ CHECK(mem_map != NULL) << "Failed to allocate large object space mem map: " << error_msg;
return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
}
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 3f2e848..ef889d4 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -96,9 +96,9 @@
// Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
std::vector<mirror::Object*,
- accounting::GCAllocator<mirror::Object*> > large_objects_ GUARDED_BY(lock_);
+ accounting::GcAllocator<mirror::Object*> > large_objects_ GUARDED_BY(lock_);
typedef SafeMap<mirror::Object*, MemMap*, std::less<mirror::Object*>,
- accounting::GCAllocator<std::pair<const mirror::Object*, MemMap*> > > MemMaps;
+ accounting::GcAllocator<std::pair<const mirror::Object*, MemMap*> > > MemMaps;
MemMaps mem_maps_ GUARDED_BY(lock_);
};
@@ -217,7 +217,7 @@
AllocationHeader* GetAllocationHeader(const mirror::Object* obj);
typedef std::set<AllocationHeader*, AllocationHeader::SortByPrevFree,
- accounting::GCAllocator<AllocationHeader*> > FreeBlocks;
+ accounting::GcAllocator<AllocationHeader*> > FreeBlocks;
byte* const begin_;
byte* const end_;
diff --git a/runtime/globals.h b/runtime/globals.h
index c397494..31574ff 100644
--- a/runtime/globals.h
+++ b/runtime/globals.h
@@ -30,30 +30,30 @@
const size_t MB = KB * KB;
const size_t GB = KB * KB * KB;
-const int kWordSize = sizeof(word);
-const int kPointerSize = sizeof(void*);
+const size_t kWordSize = sizeof(word);
+const size_t kPointerSize = sizeof(void*);
-const int kBitsPerByte = 8;
-const int kBitsPerByteLog2 = 3;
+const size_t kBitsPerByte = 8;
+const size_t kBitsPerByteLog2 = 3;
const int kBitsPerWord = kWordSize * kBitsPerByte;
-const int kWordHighBitMask = 1 << (kBitsPerWord - 1);
+const size_t kWordHighBitMask = 1 << (kBitsPerWord - 1);
// Required stack alignment
-const int kStackAlignment = 16;
+const size_t kStackAlignment = 16;
// Required object alignment
-const int kObjectAlignment = 8;
+const size_t kObjectAlignment = 8;
// ARM instruction alignment. ARM processors require code to be 4-byte aligned,
// but ARM ELF requires 8..
-const int kArmAlignment = 8;
+const size_t kArmAlignment = 8;
// MIPS instruction alignment. MIPS processors require code to be 4-byte aligned.
// TODO: Can this be 4?
-const int kMipsAlignment = 8;
+const size_t kMipsAlignment = 8;
// X86 instruction alignment. This is the recommended alignment for maximum performance.
-const int kX86Alignment = 16;
+const size_t kX86Alignment = 16;
// System page size. We check this against sysconf(_SC_PAGE_SIZE) at runtime, but use a simple
// compile-time constant so the compiler can generate better code.
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 6451d5c..00316f7 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -67,7 +67,8 @@
static void CheckMapRequest(byte*, size_t) { }
#endif
-MemMap* MemMap::MapAnonymous(const char* name, byte* addr, size_t byte_count, int prot) {
+MemMap* MemMap::MapAnonymous(const char* name, byte* addr, size_t byte_count, int prot,
+ std::string* error_msg) {
if (byte_count == 0) {
return new MemMap(name, NULL, 0, NULL, 0, prot);
}
@@ -82,8 +83,8 @@
ScopedFd fd(ashmem_create_region(debug_friendly_name.c_str(), page_aligned_byte_count));
int flags = MAP_PRIVATE;
if (fd.get() == -1) {
- PLOG(ERROR) << "ashmem_create_region failed (" << name << ")";
- return NULL;
+ *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s", name, strerror(errno));
+ return nullptr;
}
#else
ScopedFd fd(-1);
@@ -94,16 +95,17 @@
if (actual == MAP_FAILED) {
std::string maps;
ReadFileToString("/proc/self/maps", &maps);
- PLOG(ERROR) << "mmap(" << reinterpret_cast<void*>(addr) << ", " << page_aligned_byte_count
- << ", " << prot << ", " << flags << ", " << fd.get() << ", 0) failed for " << name
- << "\n" << maps;
- return NULL;
+ *error_msg = StringPrintf("anonymous mmap(%p, %zd, %x, %x, %d, 0) failed\n%s",
+ addr, page_aligned_byte_count, prot, flags, fd.get(),
+ maps.c_str());
+ return nullptr;
}
return new MemMap(name, actual, byte_count, actual, page_aligned_byte_count, prot);
}
-MemMap* MemMap::MapFileAtAddress(byte* addr, size_t byte_count,
- int prot, int flags, int fd, off_t start, bool reuse) {
+MemMap* MemMap::MapFileAtAddress(byte* addr, size_t byte_count, int prot, int flags, int fd,
+ off_t start, bool reuse, const char* filename,
+ std::string* error_msg) {
CHECK_NE(0, prot);
CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
if (byte_count == 0) {
@@ -133,10 +135,10 @@
if (actual == MAP_FAILED) {
std::string maps;
ReadFileToString("/proc/self/maps", &maps);
- PLOG(ERROR) << "mmap(" << reinterpret_cast<void*>(page_aligned_addr)
- << ", " << page_aligned_byte_count
- << ", " << prot << ", " << flags << ", " << fd << ", " << page_aligned_offset
- << ") failed\n" << maps;
+ *error_msg = StringPrintf("mmap(%p, %zd, %x, %x, %d, %lld) of file '%s' failed\n%s",
+ page_aligned_addr, page_aligned_byte_count, prot, flags, fd,
+ static_cast<int64_t>(page_aligned_offset),
+ filename, maps.c_str());
return NULL;
}
return new MemMap("file", actual + page_offset, byte_count, actual, page_aligned_byte_count,
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index e294824..919463c 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -38,14 +38,16 @@
// a name.
//
// On success, returns returns a MemMap instance. On failure, returns a NULL;
- static MemMap* MapAnonymous(const char* ashmem_name, byte* addr, size_t byte_count, int prot);
+ static MemMap* MapAnonymous(const char* ashmem_name, byte* addr, size_t byte_count, int prot,
+ std::string* error_msg);
// Map part of a file, taking care of non-page aligned offsets. The
// "start" offset is absolute, not relative.
//
// On success, returns returns a MemMap instance. On failure, returns a NULL;
- static MemMap* MapFile(size_t byte_count, int prot, int flags, int fd, off_t start) {
- return MapFileAtAddress(NULL, byte_count, prot, flags, fd, start, false);
+ static MemMap* MapFile(size_t byte_count, int prot, int flags, int fd, off_t start,
+ const char* filename, std::string* error_msg) {
+ return MapFileAtAddress(NULL, byte_count, prot, flags, fd, start, false, filename, error_msg);
}
// Map part of a file, taking care of non-page aligned offsets. The
@@ -53,8 +55,9 @@
// requesting a specific address for the base of the mapping.
//
// On success, returns returns a MemMap instance. On failure, returns a NULL;
- static MemMap* MapFileAtAddress(
- byte* addr, size_t byte_count, int prot, int flags, int fd, off_t start, bool reuse);
+ static MemMap* MapFileAtAddress(byte* addr, size_t byte_count, int prot, int flags, int fd,
+ off_t start, bool reuse, const char* filename,
+ std::string* error_msg);
// Releases the memory mapping
~MemMap();
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index dade01b..09de320 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -24,11 +24,14 @@
class MemMapTest : public testing::Test {};
TEST_F(MemMapTest, MapAnonymousEmpty) {
+ std::string error_msg;
UniquePtr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
NULL,
0,
- PROT_READ));
- ASSERT_TRUE(map.get() != NULL);
+ PROT_READ,
+ &error_msg));
+ ASSERT_TRUE(map.get() != NULL) << error_msg;
+ ASSERT_TRUE(error_msg.empty());
}
} // namespace art
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 88cffb7..cd5e865 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -35,9 +35,7 @@
inline size_t Class::GetObjectSize() const {
DCHECK(!IsVariableSize()) << " class=" << PrettyTypeOf(this);
DCHECK_EQ(sizeof(size_t), sizeof(int32_t));
- size_t result = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, object_size_), false);
- DCHECK_GE(result, sizeof(Object)) << " class=" << PrettyTypeOf(this);
- return result;
+ return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, object_size_), false);
}
inline Class* Class::GetSuperClass() const {
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index dbc6f57..d15f337 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -122,6 +122,7 @@
kStatusVerified = 7, // Logically part of linking; done pre-init.
kStatusInitializing = 8, // Class init in progress.
kStatusInitialized = 9, // Ready to go.
+ kStatusMax = 10,
};
Status GetStatus() const {
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index e659108..e460a8d 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -247,6 +247,7 @@
} else {
result = GetClass()->GetObjectSize();
}
+ DCHECK_GE(result, sizeof(Object)) << " class=" << PrettyTypeOf(GetClass());
DCHECK(!IsArtField() || result == sizeof(ArtField));
DCHECK(!IsArtMethod() || result == sizeof(ArtMethod));
return result;
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 4e17b79..55a56d6 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -89,34 +89,32 @@
if (sourceName.c_str() == NULL) {
return 0;
}
- std::string dex_location(sourceName.c_str());
NullableScopedUtfChars outputName(env, javaOutputName);
if (env->ExceptionCheck()) {
return 0;
}
- ScopedObjectAccess soa(env);
+
uint32_t dex_location_checksum;
- if (!DexFile::GetChecksum(dex_location, &dex_location_checksum)) {
- LOG(WARNING) << "Failed to compute checksum: " << dex_location;
- ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
- soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/io/IOException;",
- "Unable to get checksum of dex file: %s", dex_location.c_str());
+ std::string error_msg;
+ if (!DexFile::GetChecksum(sourceName.c_str(), &dex_location_checksum, &error_msg)) {
+ ScopedObjectAccess soa(env);
+ DCHECK(!error_msg.empty());
+ ThrowIOException("%s", error_msg.c_str());
return 0;
}
ClassLinker* linker = Runtime::Current()->GetClassLinker();
const DexFile* dex_file;
- if (outputName.c_str() == NULL) {
- dex_file = linker->FindDexFileInOatFileFromDexLocation(dex_location, dex_location_checksum);
+ if (outputName.c_str() == nullptr) {
+ dex_file = linker->FindDexFileInOatFileFromDexLocation(sourceName.c_str(),
+ dex_location_checksum, &error_msg);
} else {
- std::string oat_location(outputName.c_str());
- dex_file = linker->FindOrCreateOatFileForDexLocation(dex_location, dex_location_checksum, oat_location);
+ dex_file = linker->FindOrCreateOatFileForDexLocation(sourceName.c_str(), dex_location_checksum,
+ outputName.c_str(), &error_msg);
}
- if (dex_file == NULL) {
- LOG(WARNING) << "Failed to open dex file: " << dex_location;
- ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
- soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/io/IOException;",
- "Unable to open dex file: %s", dex_location.c_str());
+ if (dex_file == nullptr) {
+ ScopedObjectAccess soa(env);
+ ThrowIOException("%s", error_msg.c_str());
return 0;
}
return static_cast<jint>(reinterpret_cast<uintptr_t>(dex_file));
@@ -188,21 +186,17 @@
}
static jboolean DexFile_isDexOptNeeded(JNIEnv* env, jclass, jstring javaFilename) {
- bool debug_logging = false;
+ const bool kVerboseLogging = false; // Spammy logging.
+ const bool kDebugLogging = true; // Logging useful for debugging.
ScopedUtfChars filename(env, javaFilename);
- if (filename.c_str() == NULL) {
- LOG(ERROR) << "DexFile_isDexOptNeeded null filename";
- return JNI_TRUE;
- }
- if (!OS::FileExists(filename.c_str())) {
+ if ((filename.c_str() == nullptr) || !OS::FileExists(filename.c_str())) {
LOG(ERROR) << "DexFile_isDexOptNeeded file '" << filename.c_str() << "' does not exist";
- ScopedObjectAccess soa(env);
- ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
- soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/io/FileNotFoundException;",
- "%s", filename.c_str());
- return JNI_TRUE;
+ ScopedLocalRef<jclass> fnfe(env, env->FindClass("java/io/FileNotFoundException"));
+ const char* message = (filename.c_str() == nullptr) ? "<empty file name>" : filename.c_str();
+ env->ThrowNew(fnfe.get(), message);
+ return JNI_FALSE;
}
// Always treat elements of the bootclasspath as up-to-date. The
@@ -212,7 +206,7 @@
const std::vector<const DexFile*>& boot_class_path = class_linker->GetBootClassPath();
for (size_t i = 0; i < boot_class_path.size(); i++) {
if (boot_class_path[i]->GetLocation() == filename.c_str()) {
- if (debug_logging) {
+ if (kVerboseLogging) {
LOG(INFO) << "DexFile_isDexOptNeeded ignoring boot class path file: " << filename.c_str();
}
return JNI_FALSE;
@@ -221,40 +215,55 @@
// Check if we have an odex file next to the dex file.
std::string odex_filename(OatFile::DexFilenameToOdexFilename(filename.c_str()));
- UniquePtr<const OatFile> oat_file(OatFile::Open(odex_filename, odex_filename, NULL, false));
- if (oat_file.get() != NULL) {
- ScopedObjectAccess soa(env);
- const art::OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(filename.c_str(), NULL);
- if (oat_dex_file == NULL) {
- if (debug_logging) {
- LOG(INFO) << "DexFile_isDexOptNeeded GetOatDexFile failed";
- }
- } else {
+ std::string error_msg;
+ UniquePtr<const OatFile> oat_file(OatFile::Open(odex_filename, odex_filename, NULL, false,
+ &error_msg));
+ if (oat_file.get() == nullptr) {
+ if (kVerboseLogging) {
+ LOG(INFO) << "DexFile_isDexOptNeeded failed to open oat file '" << filename.c_str()
+ << "': " << error_msg;
+ }
+ error_msg.clear();
+ } else {
+ const art::OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(filename.c_str(), NULL,
+ kDebugLogging);
+ if (oat_dex_file != nullptr) {
uint32_t location_checksum;
- // If we have no classes.dex checksum such as in a user build, assume up-to-date.
- if (!DexFile::GetChecksum(filename.c_str(), &location_checksum)) {
- if (debug_logging) {
+ // If its not possible to read the classes.dex assume up-to-date as we won't be able to
+ // compile it anyway.
+ if (!DexFile::GetChecksum(filename.c_str(), &location_checksum, &error_msg)) {
+ if (kVerboseLogging) {
LOG(INFO) << "DexFile_isDexOptNeeded ignoring precompiled stripped file: "
- << filename.c_str();
+ << filename.c_str() << ": " << error_msg;
}
return JNI_FALSE;
}
- if (ClassLinker::VerifyOatFileChecksums(oat_file.get(), filename.c_str(), location_checksum)) {
- if (debug_logging) {
+ if (ClassLinker::VerifyOatFileChecksums(oat_file.get(), filename.c_str(), location_checksum,
+ &error_msg)) {
+ if (kVerboseLogging) {
LOG(INFO) << "DexFile_isDexOptNeeded precompiled file " << odex_filename
- << " is up-to-date checksum compared to " << filename.c_str();
+ << " has an up-to-date checksum compared to " << filename.c_str();
}
return JNI_FALSE;
+ } else {
+ if (kVerboseLogging) {
+ LOG(INFO) << "DexFile_isDexOptNeeded found precompiled file " << odex_filename
+ << " with an out-of-date checksum compared to " << filename.c_str()
+ << ": " << error_msg;
+ }
+ error_msg.clear();
}
}
}
// Check if we have an oat file in the cache
std::string cache_location(GetDalvikCacheFilenameOrDie(filename.c_str()));
- oat_file.reset(OatFile::Open(cache_location, filename.c_str(), NULL, false));
- if (oat_file.get() == NULL) {
- LOG(INFO) << "DexFile_isDexOptNeeded cache file " << cache_location
- << " does not exist for " << filename.c_str();
+ oat_file.reset(OatFile::Open(cache_location, filename.c_str(), NULL, false, &error_msg));
+ if (oat_file.get() == nullptr) {
+ if (kDebugLogging) {
+ LOG(INFO) << "DexFile_isDexOptNeeded cache file " << cache_location
+ << " does not exist for " << filename.c_str() << ": " << error_msg;
+ }
return JNI_TRUE;
}
@@ -262,41 +271,53 @@
if (space->IsImageSpace()) {
// TODO: Ensure this works with multiple image spaces.
const ImageHeader& image_header = space->AsImageSpace()->GetImageHeader();
- if (oat_file->GetOatHeader().GetImageFileLocationOatChecksum() != image_header.GetOatChecksum()) {
- ScopedObjectAccess soa(env);
- LOG(INFO) << "DexFile_isDexOptNeeded cache file " << cache_location
- << " has out-of-date oat checksum compared to "
- << image_header.GetImageRoot(ImageHeader::kOatLocation)->AsString()->ToModifiedUtf8();
+ if (oat_file->GetOatHeader().GetImageFileLocationOatChecksum() !=
+ image_header.GetOatChecksum()) {
+ if (kDebugLogging) {
+ ScopedObjectAccess soa(env);
+ LOG(INFO) << "DexFile_isDexOptNeeded cache file " << cache_location
+ << " has out-of-date oat checksum compared to "
+ << image_header.GetImageRoot(ImageHeader::kOatLocation)->AsString()->ToModifiedUtf8();
+ }
return JNI_TRUE;
}
if (oat_file->GetOatHeader().GetImageFileLocationOatDataBegin()
!= reinterpret_cast<uint32_t>(image_header.GetOatDataBegin())) {
- ScopedObjectAccess soa(env);
- LOG(INFO) << "DexFile_isDexOptNeeded cache file " << cache_location
- << " has out-of-date oat begin compared to "
- << image_header.GetImageRoot(ImageHeader::kOatLocation)->AsString()->ToModifiedUtf8();
+ if (kDebugLogging) {
+ ScopedObjectAccess soa(env);
+ LOG(INFO) << "DexFile_isDexOptNeeded cache file " << cache_location
+ << " has out-of-date oat begin compared to "
+ << image_header.GetImageRoot(ImageHeader::kOatLocation)->AsString()->ToModifiedUtf8();
+ }
return JNI_TRUE;
}
}
}
- ScopedObjectAccess soa(env);
uint32_t location_checksum;
- if (!DexFile::GetChecksum(filename.c_str(), &location_checksum)) {
- LOG(ERROR) << "DexFile_isDexOptNeeded failed to compute checksum of " << filename.c_str();
+ if (!DexFile::GetChecksum(filename.c_str(), &location_checksum, &error_msg)) {
+ if (kDebugLogging) {
+ LOG(ERROR) << "DexFile_isDexOptNeeded failed to compute checksum of " << filename.c_str()
+ << " (error " << error_msg << ")";
+ }
return JNI_TRUE;
}
- if (!ClassLinker::VerifyOatFileChecksums(oat_file.get(), filename.c_str(), location_checksum)) {
- LOG(INFO) << "DexFile_isDexOptNeeded cache file " << cache_location
- << " has out-of-date checksum compared to " << filename.c_str();
+ if (!ClassLinker::VerifyOatFileChecksums(oat_file.get(), filename.c_str(), location_checksum,
+ &error_msg)) {
+ if (kDebugLogging) {
+ LOG(INFO) << "DexFile_isDexOptNeeded cache file " << cache_location
+ << " has out-of-date checksum compared to " << filename.c_str()
+ << " (error " << error_msg << ")";
+ }
return JNI_TRUE;
}
- if (debug_logging) {
+ if (kVerboseLogging) {
LOG(INFO) << "DexFile_isDexOptNeeded cache file " << cache_location
<< " is up-to-date for " << filename.c_str();
}
+ CHECK(error_msg.empty()) << error_msg;
return JNI_FALSE;
}
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 486328c..4629dbd 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -135,7 +135,7 @@
return env->NewStringUTF(kIsDebugBuild ? "libartd.so" : "libart.so");
}
-static void VMRuntime_setTargetSdkVersion(JNIEnv* env, jobject, jint targetSdkVersion) {
+static void VMRuntime_setTargetSdkVersionNative(JNIEnv* env, jobject, jint targetSdkVersion) {
// This is the target SDK version of the app we're about to run.
// Note that targetSdkVersion may be CUR_DEVELOPMENT (10000).
// Note that targetSdkVersion may be 0, meaning "current".
@@ -519,7 +519,7 @@
NATIVE_METHOD(VMRuntime, nativeSetTargetHeapUtilization, "(F)V"),
NATIVE_METHOD(VMRuntime, newNonMovableArray, "!(Ljava/lang/Class;I)Ljava/lang/Object;"),
NATIVE_METHOD(VMRuntime, properties, "()[Ljava/lang/String;"),
- NATIVE_METHOD(VMRuntime, setTargetSdkVersion, "(I)V"),
+ NATIVE_METHOD(VMRuntime, setTargetSdkVersionNative, "(I)V"),
NATIVE_METHOD(VMRuntime, registerNativeAllocation, "(I)V"),
NATIVE_METHOD(VMRuntime, registerNativeFree, "(I)V"),
NATIVE_METHOD(VMRuntime, startJitCompilation, "()V"),
diff --git a/runtime/native/java_lang_VMClassLoader.cc b/runtime/native/java_lang_VMClassLoader.cc
index c23b08c..af1b548 100644
--- a/runtime/native/java_lang_VMClassLoader.cc
+++ b/runtime/native/java_lang_VMClassLoader.cc
@@ -72,8 +72,10 @@
}
const DexFile* dex_file = path[index];
const std::string& location(dex_file->GetLocation());
- UniquePtr<ZipArchive> zip_archive(ZipArchive::Open(location));
- if (zip_archive.get() == NULL) {
+ std::string error_msg;
+ UniquePtr<ZipArchive> zip_archive(ZipArchive::Open(location.c_str(), &error_msg));
+ if (zip_archive.get() == nullptr) {
+ LOG(WARNING) << "Failed to open zip archive '" << location << "': " << error_msg;
return NULL;
}
UniquePtr<ZipEntry> zip_entry(zip_archive->Find(name.c_str()));
diff --git a/runtime/oat.cc b/runtime/oat.cc
index c01f77c..6fe5d10 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -22,7 +22,7 @@
namespace art {
const uint8_t OatHeader::kOatMagic[] = { 'o', 'a', 't', '\n' };
-const uint8_t OatHeader::kOatVersion[] = { '0', '0', '7', '\0' };
+const uint8_t OatHeader::kOatVersion[] = { '0', '0', '8', '\0' };
OatHeader::OatHeader() {
memset(this, 0, sizeof(*this));
diff --git a/runtime/oat.h b/runtime/oat.h
index a653cf8..a9dc540 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -104,6 +104,19 @@
DISALLOW_COPY_AND_ASSIGN(OatHeader);
};
+// OatMethodOffsets are currently 7x32-bits=224-bits long, so if we can
+// save even one OatMethodOffsets struct, the more complicated encoding
+// using a bitmap pays for itself since few classes will have 224
+// methods.
+enum OatClassType {
+ kOatClassAllCompiled = 0, // OatClass is followed by an OatMethodOffsets for each method.
+ kOatClassSomeCompiled = 1, // A bitmap of which OatMethodOffsets are present follows the OatClass.
+ kOatClassNoneCompiled = 2, // All methods are interpretted so no OatMethodOffsets are necessary.
+ kOatClassMax = 3,
+};
+
+std::ostream& operator<<(std::ostream& os, const OatClassType& rhs);
+
class PACKED(4) OatMethodOffsets {
public:
OatMethodOffsets();
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 7ecaf01..fa2b485 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -18,6 +18,7 @@
#include <dlfcn.h>
+#include "base/bit_vector.h"
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "elf_file.h"
@@ -48,19 +49,21 @@
}
OatFile* OatFile::OpenMemory(std::vector<uint8_t>& oat_contents,
- const std::string& location) {
+ const std::string& location,
+ std::string* error_msg) {
CHECK(!oat_contents.empty()) << location;
CheckLocation(location);
UniquePtr<OatFile> oat_file(new OatFile(location));
oat_file->begin_ = &oat_contents[0];
oat_file->end_ = &oat_contents[oat_contents.size()];
- return oat_file->Setup() ? oat_file.release() : NULL;
+ return oat_file->Setup(error_msg) ? oat_file.release() : nullptr;
}
OatFile* OatFile::Open(const std::string& filename,
const std::string& location,
byte* requested_base,
- bool executable) {
+ bool executable,
+ std::string* error_msg) {
CHECK(!filename.empty()) << location;
CheckLocation(filename);
#ifdef ART_USE_PORTABLE_COMPILER
@@ -70,7 +73,7 @@
// open a generated dex file by name, remove the file, then open
// another generated dex file with the same name. http://b/10614658
if (executable) {
- return OpenDlopen(filename, location, requested_base);
+ return OpenDlopen(filename, location, requested_base, error_msg);
}
#endif
// If we aren't trying to execute, we just use our own ElfFile loader for a couple reasons:
@@ -83,21 +86,22 @@
if (file.get() == NULL) {
return NULL;
}
- return OpenElfFile(file.get(), location, requested_base, false, executable);
+ return OpenElfFile(file.get(), location, requested_base, false, executable, error_msg);
}
-OatFile* OatFile::OpenWritable(File* file, const std::string& location) {
+OatFile* OatFile::OpenWritable(File* file, const std::string& location, std::string* error_msg) {
CheckLocation(location);
- return OpenElfFile(file, location, NULL, true, false);
+ return OpenElfFile(file, location, NULL, true, false, error_msg);
}
OatFile* OatFile::OpenDlopen(const std::string& elf_filename,
const std::string& location,
- byte* requested_base) {
+ byte* requested_base,
+ std::string* error_msg) {
UniquePtr<OatFile> oat_file(new OatFile(location));
- bool success = oat_file->Dlopen(elf_filename, requested_base);
+ bool success = oat_file->Dlopen(elf_filename, requested_base, error_msg);
if (!success) {
- return NULL;
+ return nullptr;
}
return oat_file.release();
}
@@ -106,11 +110,13 @@
const std::string& location,
byte* requested_base,
bool writable,
- bool executable) {
+ bool executable,
+ std::string* error_msg) {
UniquePtr<OatFile> oat_file(new OatFile(location));
- bool success = oat_file->ElfFileOpen(file, requested_base, writable, executable);
+ bool success = oat_file->ElfFileOpen(file, requested_base, writable, executable, error_msg);
if (!success) {
- return NULL;
+ CHECK(!error_msg->empty());
+ return nullptr;
}
return oat_file.release();
}
@@ -127,120 +133,117 @@
}
}
-bool OatFile::Dlopen(const std::string& elf_filename, byte* requested_base) {
+bool OatFile::Dlopen(const std::string& elf_filename, byte* requested_base,
+ std::string* error_msg) {
char* absolute_path = realpath(elf_filename.c_str(), NULL);
if (absolute_path == NULL) {
- VLOG(class_linker) << "Failed to find absolute path for " << elf_filename;
+ *error_msg = StringPrintf("Failed to find absolute path for '%s'", elf_filename.c_str());
return false;
}
dlopen_handle_ = dlopen(absolute_path, RTLD_NOW);
free(absolute_path);
if (dlopen_handle_ == NULL) {
- VLOG(class_linker) << "Failed to dlopen " << elf_filename << ": " << dlerror();
+ *error_msg = StringPrintf("Failed to dlopen '%s': %s", elf_filename.c_str(), dlerror());
return false;
}
begin_ = reinterpret_cast<byte*>(dlsym(dlopen_handle_, "oatdata"));
if (begin_ == NULL) {
- LOG(WARNING) << "Failed to find oatdata symbol in " << elf_filename << ": " << dlerror();
+ *error_msg = StringPrintf("Failed to find oatdata symbol in '%s': %s", elf_filename.c_str(),
+ dlerror());
return false;
}
if (requested_base != NULL && begin_ != requested_base) {
- std::string maps;
- ReadFileToString("/proc/self/maps", &maps);
- LOG(WARNING) << "Failed to find oatdata symbol at expected address: oatdata="
- << reinterpret_cast<const void*>(begin_) << " != expected="
- << reinterpret_cast<const void*>(requested_base)
- << " /proc/self/maps:\n" << maps;
+ *error_msg = StringPrintf("Failed to find oatdata symbol at expected address: "
+ "oatdata=%p != expected=%p /proc/self/maps:\n",
+ begin_, requested_base);
+ ReadFileToString("/proc/self/maps", error_msg);
return false;
}
end_ = reinterpret_cast<byte*>(dlsym(dlopen_handle_, "oatlastword"));
if (end_ == NULL) {
- LOG(WARNING) << "Failed to find oatlastword symbol in " << elf_filename << ": " << dlerror();
+ *error_msg = StringPrintf("Failed to find oatlastword symbol in '%s': %s", elf_filename.c_str(),
+ dlerror());
return false;
}
// Readjust to be non-inclusive upper bound.
end_ += sizeof(uint32_t);
- return Setup();
+ return Setup(error_msg);
}
-bool OatFile::ElfFileOpen(File* file, byte* requested_base, bool writable, bool executable) {
- elf_file_.reset(ElfFile::Open(file, writable, true));
- if (elf_file_.get() == NULL) {
- if (writable) {
- PLOG(WARNING) << "Failed to open ELF file for " << file->GetPath();
- }
+bool OatFile::ElfFileOpen(File* file, byte* requested_base, bool writable, bool executable,
+ std::string* error_msg) {
+ elf_file_.reset(ElfFile::Open(file, writable, true, error_msg));
+ if (elf_file_.get() == nullptr) {
+ DCHECK(!error_msg->empty());
return false;
}
- bool loaded = elf_file_->Load(executable);
+ bool loaded = elf_file_->Load(executable, error_msg);
if (!loaded) {
- LOG(WARNING) << "Failed to load ELF file " << file->GetPath();
+ DCHECK(!error_msg->empty());
return false;
}
begin_ = elf_file_->FindDynamicSymbolAddress("oatdata");
if (begin_ == NULL) {
- LOG(WARNING) << "Failed to find oatdata symbol in " << file->GetPath();
+ *error_msg = StringPrintf("Failed to find oatdata symbol in '%s'", file->GetPath().c_str());
return false;
}
if (requested_base != NULL && begin_ != requested_base) {
- std::string maps;
- ReadFileToString("/proc/self/maps", &maps);
- LOG(WARNING) << "Failed to find oatdata symbol at expected address: oatdata="
- << reinterpret_cast<const void*>(begin_) << " != expected="
- << reinterpret_cast<const void*>(requested_base)
- << " /proc/self/maps:\n" << maps;
+ *error_msg = StringPrintf("Failed to find oatdata symbol at expected address: "
+ "oatdata=%p != expected=%p /proc/self/maps:\n",
+ begin_, requested_base);
+ ReadFileToString("/proc/self/maps", error_msg);
return false;
}
end_ = elf_file_->FindDynamicSymbolAddress("oatlastword");
if (end_ == NULL) {
- LOG(WARNING) << "Failed to find oatlastword symbol in " << file->GetPath();
+ *error_msg = StringPrintf("Failed to find oatlastword symbol in '%s'", file->GetPath().c_str());
return false;
}
// Readjust to be non-inclusive upper bound.
end_ += sizeof(uint32_t);
- return Setup();
+ return Setup(error_msg);
}
-bool OatFile::Setup() {
+bool OatFile::Setup(std::string* error_msg) {
if (!GetOatHeader().IsValid()) {
- LOG(WARNING) << "Invalid oat magic for " << GetLocation();
+ *error_msg = StringPrintf("Invalid oat magic for '%s'", GetLocation().c_str());
return false;
}
const byte* oat = Begin();
oat += sizeof(OatHeader);
if (oat > End()) {
- LOG(ERROR) << "In oat file " << GetLocation() << " found truncated OatHeader";
+ *error_msg = StringPrintf("In oat file '%s' found truncated OatHeader", GetLocation().c_str());
return false;
}
oat += GetOatHeader().GetImageFileLocationSize();
if (oat > End()) {
- LOG(ERROR) << "In oat file " << GetLocation() << " found truncated image file location: "
- << reinterpret_cast<const void*>(Begin())
- << "+" << sizeof(OatHeader)
- << "+" << GetOatHeader().GetImageFileLocationSize()
- << "<=" << reinterpret_cast<const void*>(End());
+ *error_msg = StringPrintf("In oat file '%s' found truncated image file location: "
+ "%p + %zd + %ud <= %p", GetLocation().c_str(),
+ Begin(), sizeof(OatHeader), GetOatHeader().GetImageFileLocationSize(),
+ End());
return false;
}
for (size_t i = 0; i < GetOatHeader().GetDexFileCount(); i++) {
size_t dex_file_location_size = *reinterpret_cast<const uint32_t*>(oat);
- if (dex_file_location_size == 0U) {
- LOG(ERROR) << "In oat file " << GetLocation() << " found OatDexFile # " << i
- << " with empty location name";
+ if (UNLIKELY(dex_file_location_size == 0U)) {
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd with empty location name",
+ GetLocation().c_str(), i);
return false;
}
oat += sizeof(dex_file_location_size);
- if (oat > End()) {
- LOG(ERROR) << "In oat file " << GetLocation() << " found OatDexFile # " << i
- << " truncated after dex file location size";
+ if (UNLIKELY(oat > End())) {
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd truncated after dex file "
+ "location size", GetLocation().c_str(), i);
return false;
}
const char* dex_file_location_data = reinterpret_cast<const char*>(oat);
oat += dex_file_location_size;
- if (oat > End()) {
- LOG(ERROR) << "In oat file " << GetLocation() << " found OatDexFile # " << i
- << " with truncated dex file location";
+ if (UNLIKELY(oat > End())) {
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd with truncated dex file "
+ "location", GetLocation().c_str(), i);
return false;
}
@@ -248,55 +251,54 @@
uint32_t dex_file_checksum = *reinterpret_cast<const uint32_t*>(oat);
oat += sizeof(dex_file_checksum);
- if (oat > End()) {
- LOG(ERROR) << "In oat file " << GetLocation() << " found OatDexFile # " << i
- << " for "<< dex_file_location
- << " truncated after dex file checksum";
+ if (UNLIKELY(oat > End())) {
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' truncated after "
+ "dex file checksum", GetLocation().c_str(), i,
+ dex_file_location.c_str());
return false;
}
uint32_t dex_file_offset = *reinterpret_cast<const uint32_t*>(oat);
- if (dex_file_offset == 0U) {
- LOG(ERROR) << "In oat file " << GetLocation() << " found OatDexFile # " << i
- << " for "<< dex_file_location
- << " with zero dex file offset";
+ if (UNLIKELY(dex_file_offset == 0U)) {
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' with zero dex "
+ "file offset", GetLocation().c_str(), i, dex_file_location.c_str());
return false;
}
- if (dex_file_offset > Size()) {
- LOG(ERROR) << "In oat file " << GetLocation() << " found OatDexFile # " << i
- << " for "<< dex_file_location
- << " with dex file offset" << dex_file_offset << " > " << Size();
+ if (UNLIKELY(dex_file_offset > Size())) {
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' with dex file "
+ "offset %ud > %zd", GetLocation().c_str(), i,
+ dex_file_location.c_str(), dex_file_offset, Size());
return false;
}
oat += sizeof(dex_file_offset);
- if (oat > End()) {
- LOG(ERROR) << "In oat file " << GetLocation() << " found OatDexFile # " << i
- << " for "<< dex_file_location
- << " truncated after dex file offset";
+ if (UNLIKELY(oat > End())) {
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' truncated "
+ " after dex file offsets", GetLocation().c_str(), i,
+ dex_file_location.c_str());
return false;
}
const uint8_t* dex_file_pointer = Begin() + dex_file_offset;
- if (!DexFile::IsMagicValid(dex_file_pointer)) {
- LOG(ERROR) << "In oat file " << GetLocation() << " found OatDexFile # " << i
- << " for "<< dex_file_location
- << " with invalid dex file magic: " << dex_file_pointer;
+ if (UNLIKELY(!DexFile::IsMagicValid(dex_file_pointer))) {
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' with invalid "
+ " dex file magic '%s'", GetLocation().c_str(), i,
+ dex_file_location.c_str(), dex_file_pointer);
return false;
}
- if (!DexFile::IsVersionValid(dex_file_pointer)) {
- LOG(ERROR) << "In oat file " << GetLocation() << " found OatDexFile # " << i
- << " for "<< dex_file_location
- << " with invalid dex file version: " << dex_file_pointer;
+ if (UNLIKELY(!DexFile::IsVersionValid(dex_file_pointer))) {
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' with invalid "
+ " dex file version '%s'", GetLocation().c_str(), i,
+ dex_file_location.c_str(), dex_file_pointer);
return false;
}
const DexFile::Header* header = reinterpret_cast<const DexFile::Header*>(dex_file_pointer);
const uint32_t* methods_offsets_pointer = reinterpret_cast<const uint32_t*>(oat);
oat += (sizeof(*methods_offsets_pointer) * header->class_defs_size_);
- if (oat > End()) {
- LOG(ERROR) << "In oat file " << GetLocation() << " found OatDexFile # " << i
- << " for "<< dex_file_location
- << " with truncated method offsets";
+ if (UNLIKELY(oat > End())) {
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' with truncated "
+ " method offsets", GetLocation().c_str(), i,
+ dex_file_location.c_str());
return false;
}
@@ -323,8 +325,8 @@
return end_;
}
-const OatFile::OatDexFile* OatFile::GetOatDexFile(const std::string& dex_location,
- const uint32_t* const dex_location_checksum,
+const OatFile::OatDexFile* OatFile::GetOatDexFile(const char* dex_location,
+ const uint32_t* dex_location_checksum,
bool warn_if_not_found) const {
Table::const_iterator it = oat_dex_files_.find(dex_location);
if (it != oat_dex_files_.end()) {
@@ -373,9 +375,9 @@
return reinterpret_cast<const DexFile::Header*>(dex_file_pointer_)->file_size_;
}
-const DexFile* OatFile::OatDexFile::OpenDexFile() const {
+const DexFile* OatFile::OatDexFile::OpenDexFile(std::string* error_msg) const {
return DexFile::Open(dex_file_pointer_, FileSize(), dex_file_location_,
- dex_file_location_checksum_);
+ dex_file_location_checksum_, error_msg);
}
const OatFile::OatClass* OatFile::OatDexFile::GetOatClass(uint16_t class_def_index) const {
@@ -383,29 +385,92 @@
const byte* oat_class_pointer = oat_file_->Begin() + oat_class_offset;
CHECK_LT(oat_class_pointer, oat_file_->End()) << oat_file_->GetLocation();
- mirror::Class::Status status = *reinterpret_cast<const mirror::Class::Status*>(oat_class_pointer);
- const byte* methods_pointer = oat_class_pointer + sizeof(status);
+ const byte* status_pointer = oat_class_pointer;
+ CHECK_LT(status_pointer, oat_file_->End()) << oat_file_->GetLocation();
+ mirror::Class::Status status =
+ static_cast<mirror::Class::Status>(*reinterpret_cast<const int16_t*>(status_pointer));
+ CHECK_LT(status, mirror::Class::kStatusMax);
+
+ const byte* type_pointer = status_pointer + sizeof(uint16_t);
+ CHECK_LT(type_pointer, oat_file_->End()) << oat_file_->GetLocation();
+ OatClassType type = static_cast<OatClassType>(*reinterpret_cast<const uint16_t*>(type_pointer));
+ CHECK_LT(type, kOatClassMax);
+
+ const byte* bitmap_pointer = type_pointer + sizeof(int16_t);
+ CHECK_LT(bitmap_pointer, oat_file_->End()) << oat_file_->GetLocation();
+ uint32_t bitmap_size = 0;
+ if (type == kOatClassSomeCompiled) {
+ bitmap_size = static_cast<uint32_t>(*reinterpret_cast<const uint32_t*>(bitmap_pointer));
+ bitmap_pointer += sizeof(bitmap_size);
+ CHECK_LT(bitmap_pointer, oat_file_->End()) << oat_file_->GetLocation();
+ }
+
+ const byte* methods_pointer = bitmap_pointer + bitmap_size;
CHECK_LT(methods_pointer, oat_file_->End()) << oat_file_->GetLocation();
return new OatClass(oat_file_,
status,
+ type,
+ bitmap_size,
+ reinterpret_cast<const uint32_t*>(bitmap_pointer),
reinterpret_cast<const OatMethodOffsets*>(methods_pointer));
}
OatFile::OatClass::OatClass(const OatFile* oat_file,
mirror::Class::Status status,
+ OatClassType type,
+ uint32_t bitmap_size,
+ const uint32_t* bitmap_pointer,
const OatMethodOffsets* methods_pointer)
- : oat_file_(oat_file), status_(status), methods_pointer_(methods_pointer) {}
+ : oat_file_(oat_file), status_(status), type_(type),
+ bitmap_(NULL), methods_pointer_(methods_pointer) {
+ switch (type_) {
+ case kOatClassAllCompiled: {
+ CHECK_EQ(0U, bitmap_size);
+ break;
+ }
+ case kOatClassSomeCompiled: {
+ CHECK_NE(0U, bitmap_size);
+ bitmap_ = new BitVector(0, false, Allocator::GetNoopAllocator(), bitmap_size,
+ const_cast<uint32_t*>(bitmap_pointer));
+ break;
+ }
+ case kOatClassNoneCompiled: {
+ CHECK_EQ(0U, bitmap_size);
+ methods_pointer_ = NULL;
+ break;
+ }
+ case kOatClassMax: {
+ LOG(FATAL) << "Invalid OatClassType " << type_;
+ break;
+ }
+ }
+}
-OatFile::OatClass::~OatClass() {}
-
-mirror::Class::Status OatFile::OatClass::GetStatus() const {
- return status_;
+OatFile::OatClass::~OatClass() {
+ delete bitmap_;
}
const OatFile::OatMethod OatFile::OatClass::GetOatMethod(uint32_t method_index) const {
- const OatMethodOffsets& oat_method_offsets = methods_pointer_[method_index];
+ if (methods_pointer_ == NULL) {
+ CHECK_EQ(kOatClassNoneCompiled, type_);
+ return OatMethod(NULL, 0, 0, 0, 0, 0, 0, 0);
+ }
+ size_t methods_pointer_index;
+ if (bitmap_ == NULL) {
+ CHECK_EQ(kOatClassAllCompiled, type_);
+ methods_pointer_index = method_index;
+ } else {
+ CHECK_EQ(kOatClassSomeCompiled, type_);
+ if (!bitmap_->IsBitSet(method_index)) {
+ return OatMethod(NULL, 0, 0, 0, 0, 0, 0, 0);
+ }
+ size_t num_set_bits = bitmap_->NumSetBits(method_index);
+ CHECK_NE(0U, num_set_bits);
+ methods_pointer_index = num_set_bits - 1;
+ }
+ const OatMethodOffsets& oat_method_offsets = methods_pointer_[methods_pointer_index];
return OatMethod(
oat_file_->Begin(),
oat_method_offsets.code_offset_,
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 270976f..887a9d1 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -29,6 +29,7 @@
namespace art {
+class BitVector;
class ElfFile;
class MemMap;
class OatMethodOffsets;
@@ -45,18 +46,20 @@
static OatFile* Open(const std::string& filename,
const std::string& location,
byte* requested_base,
- bool executable);
+ bool executable,
+ std::string* error_msg);
// Open an oat file from an already opened File.
// Does not use dlopen underneath so cannot be used for runtime use
// where relocations may be required. Currently used from
// ImageWriter which wants to open a writable version from an existing
// file descriptor for patching.
- static OatFile* OpenWritable(File* file, const std::string& location);
+ static OatFile* OpenWritable(File* file, const std::string& location, std::string* error_msg);
// Open an oat file backed by a std::vector with the given location.
static OatFile* OpenMemory(std::vector<uint8_t>& oat_contents,
- const std::string& location);
+ const std::string& location,
+ std::string* error_msg);
~OatFile();
@@ -143,7 +146,13 @@
class OatClass {
public:
- mirror::Class::Status GetStatus() const;
+ mirror::Class::Status GetStatus() const {
+ return status_;
+ }
+
+ OatClassType GetType() const {
+ return type_;
+ }
// get the OatMethod entry based on its index into the class
// defintion. direct methods come first, followed by virtual
@@ -155,10 +164,21 @@
private:
OatClass(const OatFile* oat_file,
mirror::Class::Status status,
+ OatClassType type,
+ uint32_t bitmap_size,
+ const uint32_t* bitmap_pointer,
const OatMethodOffsets* methods_pointer);
const OatFile* oat_file_;
+
const mirror::Class::Status status_;
+ COMPILE_ASSERT(mirror::Class::Status::kStatusMax < (2 ^ 16), class_status_wont_fit_in_16bits);
+
+ OatClassType type_;
+ COMPILE_ASSERT(OatClassType::kOatClassMax < (2 ^ 16), oat_class_type_wont_fit_in_16bits);
+
+ const BitVector* bitmap_;
+
const OatMethodOffsets* methods_pointer_;
friend class OatDexFile;
@@ -167,7 +187,7 @@
class OatDexFile {
public:
// Opens the DexFile referred to by this OatDexFile from within the containing OatFile.
- const DexFile* OpenDexFile() const;
+ const DexFile* OpenDexFile(std::string* error_msg) const;
// Returns the size of the DexFile refered to by this OatDexFile.
size_t FileSize() const;
@@ -204,10 +224,10 @@
DISALLOW_COPY_AND_ASSIGN(OatDexFile);
};
- const OatDexFile* GetOatDexFile(const std::string& dex_location,
+ const OatDexFile* GetOatDexFile(const char* dex_location,
const uint32_t* const dex_location_checksum,
- bool exception_if_not_found = true) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool exception_if_not_found = true) const;
+
std::vector<const OatDexFile*> GetOatDexFiles() const;
size_t Size() const {
@@ -219,18 +239,21 @@
static OatFile* OpenDlopen(const std::string& elf_filename,
const std::string& location,
- byte* requested_base);
+ byte* requested_base,
+ std::string* error_msg);
static OatFile* OpenElfFile(File* file,
const std::string& location,
byte* requested_base,
bool writable,
- bool executable);
+ bool executable,
+ std::string* error_msg);
explicit OatFile(const std::string& filename);
- bool Dlopen(const std::string& elf_filename, byte* requested_base);
- bool ElfFileOpen(File* file, byte* requested_base, bool writable, bool executable);
- bool Setup();
+ bool Dlopen(const std::string& elf_filename, byte* requested_base, std::string* error_msg);
+ bool ElfFileOpen(File* file, byte* requested_base, bool writable, bool executable,
+ std::string* error_msg);
+ bool Setup(std::string* error_msg);
const byte* Begin() const;
const byte* End() const;
diff --git a/runtime/object_utils.h b/runtime/object_utils.h
index 8062a89..3ca3c0b 100644
--- a/runtime/object_utils.h
+++ b/runtime/object_utils.h
@@ -700,6 +700,46 @@
return s;
}
+ uint32_t FindDexMethodIndexInOtherDexFile(const DexFile& other_dexfile)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const DexFile& dexfile = GetDexFile();
+ if (&dexfile == &other_dexfile) {
+ return method_->GetDexMethodIndex();
+ }
+ const DexFile::MethodId& mid = dexfile.GetMethodId(method_->GetDexMethodIndex());
+ const char* mid_declaring_class_descriptor = dexfile.StringByTypeIdx(mid.class_idx_);
+ const DexFile::StringId* other_descriptor =
+ other_dexfile.FindStringId(mid_declaring_class_descriptor);
+ if (other_descriptor != nullptr) {
+ const DexFile::TypeId* other_type_id =
+ other_dexfile.FindTypeId(other_dexfile.GetIndexForStringId(*other_descriptor));
+ if (other_type_id != nullptr) {
+ const char* mid_name = dexfile.GetMethodName(mid);
+ const DexFile::StringId* other_name = other_dexfile.FindStringId(mid_name);
+ if (other_name != nullptr) {
+ uint16_t other_return_type_idx;
+ std::vector<uint16_t> other_param_type_idxs;
+ bool success = other_dexfile.CreateTypeList(dexfile.GetMethodSignature(mid).ToString(),
+ &other_return_type_idx,
+ &other_param_type_idxs);
+ if (success) {
+ const DexFile::ProtoId* other_sig =
+ other_dexfile.FindProtoId(other_return_type_idx, other_param_type_idxs);
+ if (other_sig != nullptr) {
+ const DexFile::MethodId* other_mid = other_dexfile.FindMethodId(*other_type_id,
+ *other_name,
+ *other_sig);
+ if (other_mid != nullptr) {
+ return other_dexfile.GetIndexForMethodId(*other_mid);
+ }
+ }
+ }
+ }
+ }
+ }
+ return DexFile::kDexNoIndex;
+ }
+
private:
// Set the method_ field, for proxy methods looking up the interface method via the resolved
// methods table.
diff --git a/runtime/utils.cc b/runtime/utils.cc
index e2852a6..f9e4ebe 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -718,9 +718,9 @@
* this function returns false, then the given pointer may only have
* been partially advanced.
*/
-bool IsValidPartOfMemberNameUtf8(const char** pUtf8Ptr) {
+static bool IsValidPartOfMemberNameUtf8(const char** pUtf8Ptr) {
uint8_t c = (uint8_t) **pUtf8Ptr;
- if (c <= 0x7f) {
+ if (LIKELY(c <= 0x7f)) {
// It's low-ascii, so check the table.
uint32_t wordIdx = c >> 5;
uint32_t bitIdx = c & 0x1f;
@@ -761,7 +761,7 @@
}
enum ClassNameType { kName, kDescriptor };
-bool IsValidClassName(const char* s, ClassNameType type, char separator) {
+static bool IsValidClassName(const char* s, ClassNameType type, char separator) {
int arrayCount = 0;
while (*s == '[') {
arrayCount++;
@@ -1194,12 +1194,12 @@
return dalvik_cache;
}
-std::string GetDalvikCacheFilenameOrDie(const std::string& location) {
+std::string GetDalvikCacheFilenameOrDie(const char* location) {
std::string dalvik_cache(GetDalvikCacheOrDie(GetAndroidData()));
if (location[0] != '/') {
LOG(FATAL) << "Expected path in location to be absolute: "<< location;
}
- std::string cache_file(location, 1); // skip leading slash
+ std::string cache_file(&location[1]); // skip leading slash
if (!EndsWith(location, ".dex") && !EndsWith(location, ".art")) {
cache_file += "/";
cache_file += DexFile::kClassesDex;
diff --git a/runtime/utils.h b/runtime/utils.h
index 975f08b..0174b37 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -351,7 +351,7 @@
std::string GetDalvikCacheOrDie(const char* android_data);
// Returns the dalvik-cache location for a DexFile or OatFile, or dies trying.
-std::string GetDalvikCacheFilenameOrDie(const std::string& location);
+std::string GetDalvikCacheFilenameOrDie(const char* location);
// Check whether the given magic matches a known file type.
bool IsZipMagic(uint32_t magic);
diff --git a/runtime/zip_archive.cc b/runtime/zip_archive.cc
index 8e09e78..db273ec 100644
--- a/runtime/zip_archive.cc
+++ b/runtime/zip_archive.cc
@@ -19,10 +19,12 @@
#include <vector>
#include <fcntl.h>
+#include <stdio.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
+#include "base/stringprintf.h"
#include "base/unix_file/fd_file.h"
#include "UniquePtr.h"
@@ -247,35 +249,38 @@
return true;
}
-bool ZipEntry::ExtractToFile(File& file) {
+bool ZipEntry::ExtractToFile(File& file, std::string* error_msg) {
uint32_t length = GetUncompressedLength();
int result = TEMP_FAILURE_RETRY(ftruncate(file.Fd(), length));
if (result == -1) {
- PLOG(WARNING) << "Zip: failed to ftruncate " << file.GetPath() << " to length " << length;
+ *error_msg = StringPrintf("Zip: failed to ftruncate '%s' to length %ud", file.GetPath().c_str(),
+ length);
return false;
}
- UniquePtr<MemMap> map(MemMap::MapFile(length, PROT_READ | PROT_WRITE, MAP_SHARED, file.Fd(), 0));
+ UniquePtr<MemMap> map(MemMap::MapFile(length, PROT_READ | PROT_WRITE, MAP_SHARED, file.Fd(), 0,
+ file.GetPath().c_str(), error_msg));
if (map.get() == NULL) {
- LOG(WARNING) << "Zip: failed to mmap space for " << file.GetPath();
+ *error_msg = StringPrintf("Zip: failed to mmap space for '%s': %s", file.GetPath().c_str(),
+ error_msg->c_str());
return false;
}
- return ExtractToMemory(map->Begin(), map->Size());
+ return ExtractToMemory(map->Begin(), map->Size(), error_msg);
}
-bool ZipEntry::ExtractToMemory(uint8_t* begin, size_t size) {
+bool ZipEntry::ExtractToMemory(uint8_t* begin, size_t size, std::string* error_msg) {
// If size is zero, data offset will be meaningless, so bail out early.
if (size == 0) {
return true;
}
off64_t data_offset = GetDataOffset();
if (data_offset == -1) {
- LOG(WARNING) << "Zip: data_offset=" << data_offset;
+ *error_msg = StringPrintf("Zip: data_offset=%lld", data_offset);
return false;
}
if (lseek64(zip_archive_->fd_, data_offset, SEEK_SET) != data_offset) {
- PLOG(WARNING) << "Zip: lseek to data at " << data_offset << " failed";
+ *error_msg = StringPrintf("Zip: lseek to data at %lld failed", data_offset);
return false;
}
@@ -288,25 +293,25 @@
return InflateToMemory(begin, size, zip_archive_->fd_,
GetUncompressedLength(), GetCompressedLength());
default:
- LOG(WARNING) << "Zip: unknown compression method " << std::hex << GetCompressionMethod();
+ *error_msg = StringPrintf("Zip: unknown compression method 0x%x", GetCompressionMethod());
return false;
}
}
-MemMap* ZipEntry::ExtractToMemMap(const char* entry_filename) {
+MemMap* ZipEntry::ExtractToMemMap(const char* entry_filename, std::string* error_msg) {
std::string name(entry_filename);
name += " extracted in memory from ";
name += entry_filename;
UniquePtr<MemMap> map(MemMap::MapAnonymous(name.c_str(),
NULL,
GetUncompressedLength(),
- PROT_READ | PROT_WRITE));
- if (map.get() == NULL) {
- LOG(ERROR) << "Zip: mmap for '" << entry_filename << "' failed";
+ PROT_READ | PROT_WRITE, error_msg));
+ if (map.get() == nullptr) {
+ DCHECK(!error_msg->empty());
return NULL;
}
- bool success = ExtractToMemory(map->Begin(), map->Size());
+ bool success = ExtractToMemory(map->Begin(), map->Size(), error_msg);
if (!success) {
LOG(ERROR) << "Zip: Failed to extract '" << entry_filename << "' to memory";
return NULL;
@@ -329,27 +334,25 @@
}
}
-ZipArchive* ZipArchive::Open(const std::string& filename) {
- DCHECK(!filename.empty());
- int fd = open(filename.c_str(), O_RDONLY, 0);
+ZipArchive* ZipArchive::Open(const char* filename, std::string* error_msg) {
+ DCHECK(filename != nullptr);
+ int fd = open(filename, O_RDONLY, 0);
if (fd == -1) {
- PLOG(WARNING) << "Unable to open '" << filename << "'";
+ *error_msg = StringPrintf("Zip: unable to open '%s': %s", filename, strerror(errno));
return NULL;
}
- return OpenFromFd(fd);
+ return OpenFromFd(fd, filename, error_msg);
}
-ZipArchive* ZipArchive::OpenFromFd(int fd) {
+ZipArchive* ZipArchive::OpenFromFd(int fd, const char* filename, std::string* error_msg) {
SetCloseOnExec(fd);
- UniquePtr<ZipArchive> zip_archive(new ZipArchive(fd));
- if (zip_archive.get() == NULL) {
- return NULL;
- }
- if (!zip_archive->MapCentralDirectory()) {
+ UniquePtr<ZipArchive> zip_archive(new ZipArchive(fd, filename));
+ CHECK(zip_archive.get() != nullptr);
+ if (!zip_archive->MapCentralDirectory(error_msg)) {
zip_archive->Close();
return NULL;
}
- if (!zip_archive->Parse()) {
+ if (!zip_archive->Parse(error_msg)) {
zip_archive->Close();
return NULL;
}
@@ -374,19 +377,28 @@
dir_offset_ = 0;
}
+std::string ZipArchive::ErrorStringPrintf(const char* fmt, ...) {
+ va_list ap;
+ va_start(ap, fmt);
+ std::string result(StringPrintf("Zip '%s' : ", filename_.c_str()));
+ StringAppendV(&result, fmt, ap);
+ va_end(ap);
+ return result;
+}
+
// Find the zip Central Directory and memory-map it.
//
// On success, returns true after populating fields from the EOCD area:
// num_entries_
// dir_offset_
// dir_map_
-bool ZipArchive::MapCentralDirectory() {
+bool ZipArchive::MapCentralDirectory(std::string* error_msg) {
/*
* Get and test file length.
*/
off64_t file_length = lseek64(fd_, 0, SEEK_END);
if (file_length < kEOCDLen) {
- LOG(WARNING) << "Zip: length " << file_length << " is too small to be zip";
+ *error_msg = ErrorStringPrintf("length %lld is too small to be zip", file_length);
return false;
}
@@ -396,27 +408,26 @@
}
UniquePtr<uint8_t[]> scan_buf(new uint8_t[read_amount]);
- if (scan_buf.get() == NULL) {
- return false;
- }
+ CHECK(scan_buf.get() != nullptr);
/*
* Make sure this is a Zip archive.
*/
if (lseek64(fd_, 0, SEEK_SET) != 0) {
- PLOG(WARNING) << "seek to start failed: ";
+ *error_msg = ErrorStringPrintf("seek to start failed: %s", strerror(errno));
return false;
}
ssize_t actual = TEMP_FAILURE_RETRY(read(fd_, scan_buf.get(), sizeof(int32_t)));
if (actual != static_cast<ssize_t>(sizeof(int32_t))) {
- PLOG(INFO) << "couldn't read first signature from zip archive: ";
+ *error_msg = ErrorStringPrintf("couldn\'t read first signature from zip archive: %s",
+ strerror(errno));
return false;
}
unsigned int header = Le32ToHost(scan_buf.get());
if (header != kLFHSignature) {
- LOG(VERBOSE) << "Not a Zip archive (found " << std::hex << header << ")";
+ *error_msg = ErrorStringPrintf("not a zip archive (found 0x%x)", header);
return false;
}
@@ -433,12 +444,13 @@
off64_t search_start = file_length - read_amount;
if (lseek64(fd_, search_start, SEEK_SET) != search_start) {
- PLOG(WARNING) << "Zip: seek " << search_start << " failed";
+ *error_msg = ErrorStringPrintf("seek %lld failed: %s", search_start, strerror(errno));
return false;
}
actual = TEMP_FAILURE_RETRY(read(fd_, scan_buf.get(), read_amount));
if (actual != static_cast<ssize_t>(read_amount)) {
- PLOG(WARNING) << "Zip: read " << actual << ", expected " << read_amount << ". failed";
+ *error_msg = ErrorStringPrintf("read %lld, expected %zd. %s", search_start, read_amount,
+ strerror(errno));
return false;
}
@@ -454,14 +466,14 @@
}
}
if (i < 0) {
- LOG(WARNING) << "Zip: EOCD not found, not a zip file";
+ *error_msg = ErrorStringPrintf("EOCD not found, not a zip file");
return false;
}
off64_t eocd_offset = search_start + i;
const byte* eocd_ptr = scan_buf.get() + i;
- DCHECK(eocd_offset < file_length);
+ CHECK(eocd_offset < file_length);
// Grab the CD offset and size, and the number of entries in the
// archive. Verify that they look reasonable.
@@ -474,29 +486,28 @@
uint16_t comment_size = Le16ToHost(eocd_ptr + kEOCDCommentSize);
if ((uint64_t) dir_offset + (uint64_t) dir_size > (uint64_t) eocd_offset) {
- LOG(WARNING) << "Zip: bad offsets ("
- << "dir=" << dir_offset << ", "
- << "size=" << dir_size << ", "
- << "eocd=" << eocd_offset << ")";
+ *error_msg = ErrorStringPrintf("bad offsets (dir=%ud, size=%ud, eocd=%lld)",
+ dir_offset, dir_size, eocd_offset);
return false;
}
if (num_entries == 0) {
- LOG(WARNING) << "Zip: empty archive?";
+ *error_msg = ErrorStringPrintf("empty archive?");
return false;
} else if (num_entries != total_num_entries || disk_number != 0 || disk_with_central_dir != 0) {
- LOG(WARNING) << "spanned archives not supported";
+ *error_msg = ErrorStringPrintf("spanned archives not supported");
return false;
}
// Check to see if comment is a sane size
if ((comment_size > (file_length - kEOCDLen))
|| (eocd_offset > (file_length - kEOCDLen) - comment_size)) {
- LOG(WARNING) << "comment size runs off end of file";
+ *error_msg = ErrorStringPrintf("comment size runs off end of file");
return false;
}
// It all looks good. Create a mapping for the CD.
- dir_map_.reset(MemMap::MapFile(dir_size, PROT_READ, MAP_SHARED, fd_, dir_offset));
+ dir_map_.reset(MemMap::MapFile(dir_size, PROT_READ, MAP_SHARED, fd_, dir_offset,
+ filename_.c_str(), error_msg));
if (dir_map_.get() == NULL) {
return false;
}
@@ -506,7 +517,7 @@
return true;
}
-bool ZipArchive::Parse() {
+bool ZipArchive::Parse(std::string* error_msg) {
const byte* cd_ptr = dir_map_->Begin();
size_t cd_length = dir_map_->Size();
@@ -515,23 +526,23 @@
const byte* ptr = cd_ptr;
for (int i = 0; i < num_entries_; i++) {
if (Le32ToHost(ptr) != kCDESignature) {
- LOG(WARNING) << "Zip: missed a central dir sig (at " << i << ")";
+ *error_msg = ErrorStringPrintf("missed a central dir sig (at %d)", i);
return false;
}
if (ptr + kCDELen > cd_ptr + cd_length) {
- LOG(WARNING) << "Zip: ran off the end (at " << i << ")";
+ *error_msg = ErrorStringPrintf("ran off the end (at %d)", i);
return false;
}
int64_t local_hdr_offset = Le32ToHost(ptr + kCDELocalOffset);
if (local_hdr_offset >= dir_offset_) {
- LOG(WARNING) << "Zip: bad LFH offset " << local_hdr_offset << " at entry " << i;
+ *error_msg = ErrorStringPrintf("bad LFH offset %lld at entry %d", local_hdr_offset, i);
return false;
}
uint16_t gpbf = Le16ToHost(ptr + kCDEGPBFlags);
if ((gpbf & kGPFUnsupportedMask) != 0) {
- LOG(WARNING) << "Invalid General Purpose Bit Flag: " << gpbf;
+ *error_msg = ErrorStringPrintf("invalid general purpose bit flag %x", gpbf);
return false;
}
@@ -544,16 +555,15 @@
// Check name for NULL characters
if (memchr(name, 0, name_len) != NULL) {
- LOG(WARNING) << "Filename contains NUL byte";
+ *error_msg = ErrorStringPrintf("filename contains NUL byte");
return false;
}
dir_entries_.Put(StringPiece(name, name_len), ptr);
ptr += kCDELen + name_len + extra_len + comment_len;
if (ptr > cd_ptr + cd_length) {
- LOG(WARNING) << "Zip: bad CD advance "
- << "(" << ptr << " vs " << (cd_ptr + cd_length) << ") "
- << "at entry " << i;
+ *error_msg = ErrorStringPrintf("bad CD advance (%p vs %p) at entry %d",
+ ptr, cd_ptr + cd_length, i);
return false;
}
}
diff --git a/runtime/zip_archive.h b/runtime/zip_archive.h
index d9ccba2..8ff952b 100644
--- a/runtime/zip_archive.h
+++ b/runtime/zip_archive.h
@@ -19,6 +19,7 @@
#include <stdint.h>
#include <zlib.h>
+#include <string>
#include "base/logging.h"
#include "base/stringpiece.h"
@@ -36,9 +37,9 @@
class ZipEntry {
public:
- bool ExtractToFile(File& file);
- bool ExtractToMemory(uint8_t* begin, size_t size);
- MemMap* ExtractToMemMap(const char* entry_filename);
+ bool ExtractToFile(File& file, std::string* error_msg);
+ bool ExtractToMemory(uint8_t* begin, size_t size, std::string* error_msg);
+ MemMap* ExtractToMemMap(const char* entry_filename, std::string* error_msg);
uint32_t GetUncompressedLength();
uint32_t GetCrc32();
@@ -109,8 +110,8 @@
static const int32_t kGPFUnsupportedMask = (kGPFEncryptedFlag);
// return new ZipArchive instance on success, NULL on error.
- static ZipArchive* Open(const std::string& filename);
- static ZipArchive* OpenFromFd(int fd);
+ static ZipArchive* Open(const char* filename, std::string* error_msg);
+ static ZipArchive* OpenFromFd(int fd, const char* filename, std::string* error_msg);
ZipEntry* Find(const char* name) const;
@@ -119,11 +120,14 @@
}
private:
- explicit ZipArchive(int fd) : fd_(fd), num_entries_(0), dir_offset_(0) {}
+ explicit ZipArchive(int fd, const char* filename)
+ : fd_(fd), num_entries_(0), dir_offset_(0), filename_(filename) {}
- bool MapCentralDirectory();
- bool Parse();
+ bool MapCentralDirectory(std::string* error_msg);
+ bool Parse(std::string* error_msg);
void Close();
+ std::string ErrorStringPrintf(const char* fmt, ...)
+ __attribute__((__format__(__printf__, 2, 3))) COLD_ATTR;
int fd_;
uint16_t num_entries_;
@@ -131,6 +135,8 @@
UniquePtr<MemMap> dir_map_;
typedef SafeMap<StringPiece, const byte*> DirEntries;
DirEntries dir_entries_;
+ // Containing file for error reporting.
+ const std::string filename_;
friend class ZipEntry;
diff --git a/runtime/zip_archive_test.cc b/runtime/zip_archive_test.cc
index 9bdc24b..622dc89 100644
--- a/runtime/zip_archive_test.cc
+++ b/runtime/zip_archive_test.cc
@@ -29,8 +29,10 @@
class ZipArchiveTest : public CommonTest {};
TEST_F(ZipArchiveTest, FindAndExtract) {
- UniquePtr<ZipArchive> zip_archive(ZipArchive::Open(GetLibCoreDexFileName()));
- ASSERT_TRUE(zip_archive.get() != false);
+ std::string error_msg;
+ UniquePtr<ZipArchive> zip_archive(ZipArchive::Open(GetLibCoreDexFileName().c_str(), &error_msg));
+ ASSERT_TRUE(zip_archive.get() != false) << error_msg;
+ ASSERT_TRUE(error_msg.empty());
UniquePtr<ZipEntry> zip_entry(zip_archive->Find("classes.dex"));
ASSERT_TRUE(zip_entry.get() != false);
@@ -38,8 +40,9 @@
ASSERT_NE(-1, tmp.GetFd());
UniquePtr<File> file(new File(tmp.GetFd(), tmp.GetFilename()));
ASSERT_TRUE(file.get() != NULL);
- bool success = zip_entry->ExtractToFile(*file);
- ASSERT_TRUE(success);
+ bool success = zip_entry->ExtractToFile(*file, &error_msg);
+ ASSERT_TRUE(success) << error_msg;
+ ASSERT_TRUE(error_msg.empty());
file.reset(NULL);
uint32_t computed_crc = crc32(0L, Z_NULL, 0);