Merge "Add internal compiler option to control defaulting of boot.art" into dalvik-dev
diff --git a/src/assembler_arm.cc b/src/assembler_arm.cc
index e1976d7..ac39f2e 100644
--- a/src/assembler_arm.cc
+++ b/src/assembler_arm.cc
@@ -1091,7 +1091,7 @@
int32_t ArmAssembler::EncodeBranchOffset(int offset, int32_t inst) {
// The offset is off by 8 due to the way the ARM CPUs read PC.
offset -= 8;
- CHECK(IsAligned(offset, 4));
+ CHECK_ALIGNED(offset, 4);
CHECK(IsInt(CountOneBits(kBranchOffsetMask), offset));
// Properly preserve only the bits supported in the instruction.
@@ -1423,7 +1423,7 @@
void ArmAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
const std::vector<ManagedRegister>& callee_save_regs) {
- CHECK(IsAligned(frame_size, kStackAlignment));
+ CHECK_ALIGNED(frame_size, kStackAlignment);
CHECK_EQ(R0, method_reg.AsArm().AsCoreRegister());
// Push callee saves and link register
@@ -1447,7 +1447,7 @@
void ArmAssembler::RemoveFrame(size_t frame_size,
const std::vector<ManagedRegister>& callee_save_regs) {
- CHECK(IsAligned(frame_size, kStackAlignment));
+ CHECK_ALIGNED(frame_size, kStackAlignment);
// Compute callee saves to pop and PC
RegList pop_list = 1 << PC;
size_t pop_values = 1;
diff --git a/src/assembler_arm.h b/src/assembler_arm.h
index e1d43e4..bd5ed0b 100644
--- a/src/assembler_arm.h
+++ b/src/assembler_arm.h
@@ -179,7 +179,7 @@
const uint32_t offset_mask = (1 << 12) - 1;
uint32_t offset = encoding_ & offset_mask;
CHECK(IsAbsoluteUint(10, offset)); // In the range -1020 to +1020.
- CHECK(IsAligned(offset, 2)); // Multiple of 4.
+ CHECK_ALIGNED(offset, 2); // Multiple of 4.
int mode = encoding_ & ((8|4|1) << 21);
CHECK((mode == Offset) || (mode == NegOffset));
uint32_t vencoding = (encoding_ & (0xf << kRnShift)) | (offset >> 2);
diff --git a/src/assembler_x86.cc b/src/assembler_x86.cc
index c7b2bb5..f3b3c11 100644
--- a/src/assembler_x86.cc
+++ b/src/assembler_x86.cc
@@ -1379,7 +1379,7 @@
void X86Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
const std::vector<ManagedRegister>& spill_regs) {
- CHECK(IsAligned(frame_size, kStackAlignment));
+ CHECK_ALIGNED(frame_size, kStackAlignment);
CHECK_EQ(0u, spill_regs.size()); // no spilled regs on x86
// return address then method on stack
addl(ESP, Immediate(-frame_size + kPointerSize /*method*/ +
@@ -1389,19 +1389,19 @@
void X86Assembler::RemoveFrame(size_t frame_size,
const std::vector<ManagedRegister>& spill_regs) {
- CHECK(IsAligned(frame_size, kStackAlignment));
+ CHECK_ALIGNED(frame_size, kStackAlignment);
CHECK_EQ(0u, spill_regs.size()); // no spilled regs on x86
addl(ESP, Immediate(frame_size - kPointerSize));
ret();
}
void X86Assembler::IncreaseFrameSize(size_t adjust) {
- CHECK(IsAligned(adjust, kStackAlignment));
+ CHECK_ALIGNED(adjust, kStackAlignment);
addl(ESP, Immediate(-adjust));
}
void X86Assembler::DecreaseFrameSize(size_t adjust) {
- CHECK(IsAligned(adjust, kStackAlignment));
+ CHECK_ALIGNED(adjust, kStackAlignment);
addl(ESP, Immediate(adjust));
}
diff --git a/src/class_linker.cc b/src/class_linker.cc
index bd23b2d..eda60ca 100644
--- a/src/class_linker.cc
+++ b/src/class_linker.cc
@@ -2387,7 +2387,7 @@
// Now we want to pack all of the double-wide fields together. If
// we're not aligned, though, we want to shuffle one 32-bit field
// into place. If we can't find one, we'll have to pad it.
- if (current_field != num_fields && !IsAligned(field_offset.Uint32Value(), 8)) {
+ if (current_field != num_fields && !IsAligned<8>(field_offset.Uint32Value())) {
for (size_t i = 0; i < grouped_and_sorted_fields.size(); i++) {
Field* field = grouped_and_sorted_fields[i];
const Class* type = field->GetTypeDuringLinking();
@@ -2408,7 +2408,7 @@
// Alignment is good, shuffle any double-wide fields forward, and
// finish assigning field offsets to all fields.
- DCHECK(current_field == num_fields || IsAligned(field_offset.Uint32Value(), 8));
+ DCHECK(current_field == num_fields || IsAligned<8>(field_offset.Uint32Value()));
while (!grouped_and_sorted_fields.empty()) {
Field* field = grouped_and_sorted_fields.front();
grouped_and_sorted_fields.pop_front();
diff --git a/src/heap.cc b/src/heap.cc
index 9bed951..d7c9584 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -171,7 +171,7 @@
bool Heap::IsHeapAddress(const Object* obj) {
// Note: we deliberately don't take the lock here, and mustn't test anything that would
// require taking the lock.
- if (!IsAligned(obj, kObjectAlignment)) {
+ if (!IsAligned<kObjectAlignment>(obj)) {
return false;
}
// TODO
@@ -191,7 +191,7 @@
void Heap::VerifyObjectLocked(const Object* obj) {
lock_->AssertHeld();
if (obj != NULL) {
- if (!IsAligned(obj, kObjectAlignment)) {
+ if (!IsAligned<kObjectAlignment>(obj)) {
LOG(FATAL) << "Object isn't aligned: " << obj;
} else if (!live_bitmap_->Test(obj)) {
// TODO: we don't hold a lock here as it is assumed the live bit map
@@ -205,7 +205,7 @@
const Class* c = *reinterpret_cast<Class* const *>(raw_addr);
if (c == NULL) {
LOG(FATAL) << "Null class" << " in object: " << obj;
- } else if (!IsAligned(c, kObjectAlignment)) {
+ } else if (!IsAligned<kObjectAlignment>(c)) {
LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
} else if (!live_bitmap_->Test(c)) {
LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
@@ -291,7 +291,7 @@
CHECK(live_bitmap_ != NULL);
byte* current = space->GetBase() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
while (current < space->GetLimit()) {
- DCHECK(IsAligned(current, kObjectAlignment));
+ DCHECK_ALIGNED(current, kObjectAlignment);
const Object* obj = reinterpret_cast<const Object*>(current);
live_bitmap_->Set(obj);
current += RoundUp(obj->SizeOf(), kObjectAlignment);
diff --git a/src/indirect_reference_table.cc b/src/indirect_reference_table.cc
index 4283b83..716c214 100644
--- a/src/indirect_reference_table.cc
+++ b/src/indirect_reference_table.cc
@@ -86,7 +86,7 @@
DCHECK(obj != NULL);
// TODO: stronger sanity check on the object (such as in heap)
- DCHECK(IsAligned(reinterpret_cast<intptr_t>(obj), 8)) << reinterpret_cast<const void*>(obj);
+ DCHECK_ALIGNED(reinterpret_cast<intptr_t>(obj), 8);
DCHECK(table_ != NULL);
DCHECK_LE(alloc_entries_, max_entries_);
DCHECK_GE(segment_state_.parts.numHoles, prevState.parts.numHoles);
diff --git a/src/oat.cc b/src/oat.cc
index 8bf8299..14040f3 100644
--- a/src/oat.cc
+++ b/src/oat.cc
@@ -51,13 +51,13 @@
uint32_t OatHeader::GetExecutableOffset() const {
DCHECK(IsValid());
- DCHECK(IsAligned(executable_offset_, kPageSize));
+ DCHECK_ALIGNED(executable_offset_, kPageSize);
CHECK_GT(executable_offset_, sizeof(OatHeader));
return executable_offset_;
}
void OatHeader::SetExecutableOffset(uint32_t executable_offset) {
- DCHECK(IsAligned(executable_offset, kPageSize));
+ DCHECK_ALIGNED(executable_offset, kPageSize);
CHECK_GT(executable_offset, sizeof(OatHeader));
DCHECK(IsValid());
DCHECK_EQ(executable_offset_, 0U);
diff --git a/src/oat_writer.cc b/src/oat_writer.cc
index 99c9361..931dbc1 100644
--- a/src/oat_writer.cc
+++ b/src/oat_writer.cc
@@ -180,7 +180,7 @@
if (compiled_method != NULL) {
offset = compiled_method->AlignCode(offset);
- DCHECK(IsAligned(offset, kArmAlignment)) << std::hex << offset;
+ DCHECK_ALIGNED(offset, kArmAlignment);
const std::vector<uint8_t>& code = compiled_method->GetCode();
size_t code_size = code.size() * sizeof(code[0]);
uint32_t thumb_offset = compiled_method->CodeDelta();
@@ -224,7 +224,7 @@
const CompiledInvokeStub* compiled_invoke_stub = compiler_->GetCompiledInvokeStub(method);
if (compiled_invoke_stub != NULL) {
offset = CompiledMethod::AlignCode(offset, compiler_->GetInstructionSet());
- DCHECK(IsAligned(offset, kArmAlignment)) << std::hex << offset;
+ DCHECK_ALIGNED(offset, kArmAlignment);
const std::vector<uint8_t>& invoke_stub = compiled_invoke_stub->GetCode();
size_t invoke_stub_size = invoke_stub.size() * sizeof(invoke_stub[0]);
invoke_stub_offset = (invoke_stub_size == 0) ? 0 : offset;
@@ -403,7 +403,7 @@
code_offset += aligned_code_delta;
DCHECK_CODE_OFFSET();
}
- DCHECK(IsAligned(code_offset, kArmAlignment)) << std::hex << code_offset;
+ DCHECK_ALIGNED(code_offset, kArmAlignment);
const std::vector<uint8_t>& code = compiled_method->GetCode();
size_t code_size = code.size() * sizeof(code[0]);
DCHECK((code_size == 0 && method->GetOatCodeOffset() == 0)
@@ -480,7 +480,7 @@
code_offset += aligned_code_delta;
DCHECK_CODE_OFFSET();
}
- DCHECK(IsAligned(code_offset, kArmAlignment)) << std::hex << code_offset;
+ DCHECK_ALIGNED(code_offset, kArmAlignment);
const std::vector<uint8_t>& invoke_stub = compiled_invoke_stub->GetCode();
size_t invoke_stub_size = invoke_stub.size() * sizeof(invoke_stub[0]);
DCHECK((invoke_stub_size == 0 && method->GetOatInvokeStubOffset() == 0)
diff --git a/src/object.h b/src/object.h
index 8f6f75d..a93733c 100644
--- a/src/object.h
+++ b/src/object.h
@@ -2173,43 +2173,36 @@
DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous());
Class* type = GetTypeDuringLinking();
if (type != NULL && (type->IsPrimitiveDouble() || type->IsPrimitiveLong())) {
- DCHECK(IsAligned(num_bytes.Uint32Value(), 8));
+ DCHECK_ALIGNED(num_bytes.Uint32Value(), 8);
}
- SetField32(OFFSET_OF_OBJECT_MEMBER(Field, offset_),
- num_bytes.Uint32Value(), false);
+ SetField32(OFFSET_OF_OBJECT_MEMBER(Field, offset_), num_bytes.Uint32Value(), false);
}
inline Class* Field::GetDeclaringClass() const {
- Class* result = GetFieldObject<Class*>(
- OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_), false);
+ Class* result = GetFieldObject<Class*>(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_), false);
DCHECK(result != NULL);
DCHECK(result->IsLoaded() || result->IsErroneous());
return result;
}
inline void Field::SetDeclaringClass(Class *new_declaring_class) {
- SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_),
- new_declaring_class, false);
+ SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_), new_declaring_class, false);
}
inline Class* Method::GetDeclaringClass() const {
- Class* result =
- GetFieldObject<Class*>(
- OFFSET_OF_OBJECT_MEMBER(Method, declaring_class_), false);
+ Class* result = GetFieldObject<Class*>(OFFSET_OF_OBJECT_MEMBER(Method, declaring_class_), false);
DCHECK(result != NULL);
DCHECK(result->IsIdxLoaded() || result->IsErroneous());
return result;
}
inline void Method::SetDeclaringClass(Class *new_declaring_class) {
- SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Method, declaring_class_),
- new_declaring_class, false);
+ SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Method, declaring_class_), new_declaring_class, false);
}
inline uint32_t Method::GetReturnTypeIdx() const {
DCHECK(GetDeclaringClass()->IsResolved() || GetDeclaringClass()->IsErroneous());
- return GetField32(OFFSET_OF_OBJECT_MEMBER(Method, java_return_type_idx_),
- false);
+ return GetField32(OFFSET_OF_OBJECT_MEMBER(Method, java_return_type_idx_), false);
}
inline bool Method::IsReturnAReference() const {
diff --git a/src/thread.cc b/src/thread.cc
index e143af5..79dbf81 100644
--- a/src/thread.cc
+++ b/src/thread.cc
@@ -1245,6 +1245,20 @@
return result;
}
+const Method* Thread::GetCurrentMethod() const {
+ Method* m = top_of_managed_stack_.GetMethod();
+ // We use JNI internally for exception throwing, so it's possible to arrive
+ // here via a "FromCode" function, in which case there's a synthetic
+ // callee-save method at the top of the stack. These shouldn't be user-visible,
+ // so if we find one, skip it and return the compiled method underneath.
+ if (m->IsCalleeSaveMethod()) {
+ Frame f = top_of_managed_stack_;
+ f.Next();
+ m = f.GetMethod();
+ }
+ return m;
+}
+
bool Thread::HoldsLock(Object* object) {
if (object == NULL) {
return false;
diff --git a/src/thread.h b/src/thread.h
index 8fd0985..74e6f83 100644
--- a/src/thread.h
+++ b/src/thread.h
@@ -233,9 +233,7 @@
// Returns the Method* for the current method.
// This is used by the JNI implementation for logging and diagnostic purposes.
- const Method* GetCurrentMethod() const {
- return top_of_managed_stack_.GetMethod();
- }
+ const Method* GetCurrentMethod() const;
bool IsExceptionPending() const {
return exception_ != NULL;
diff --git a/src/utils.h b/src/utils.h
index 35b350b..dd597e9 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -25,17 +25,23 @@
return (x & (x - 1)) == 0;
}
-template<typename T>
-static inline bool IsAligned(T x, int n) {
- CHECK(IsPowerOfTwo(n));
+template<int n, typename T>
+static inline bool IsAligned(T x) {
+ COMPILE_ASSERT((n & (n - 1)) == 0, n_not_power_of_two);
return (x & (n - 1)) == 0;
}
-template<typename T>
-static inline bool IsAligned(T* x, int n) {
- return IsAligned(reinterpret_cast<uintptr_t>(x), n);
+template<int n, typename T>
+static inline bool IsAligned(T* x) {
+ return IsAligned<n>(reinterpret_cast<uintptr_t>(x));
}
+#define CHECK_ALIGNED(value, alignment) \
+ CHECK(::art::IsAligned<alignment>(value)) << reinterpret_cast<void*>(value)
+
+#define DCHECK_ALIGNED(value, alignment) \
+ DCHECK(::art::IsAligned<alignment>(value)) << reinterpret_cast<void*>(value)
+
// Check whether an N-bit two's-complement representation can hold value.
static inline bool IsInt(int N, word value) {
CHECK_LT(0, N);
diff --git a/test/003-omnibus-opcodes/build b/test/003-omnibus-opcodes/build
index 6ee341b..47abae2 100644
--- a/test/003-omnibus-opcodes/build
+++ b/test/003-omnibus-opcodes/build
@@ -23,10 +23,10 @@
${JAVAC} -d classes `find src2 -name '*.java'`
dx -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex classes
-zip ${ANDROID_PRODUCT_OUT}/system/framework/$TEST_NAME.jar classes.dex
+zip ${ANDROID_PRODUCT_OUT}/data/art-test/$TEST_NAME.jar classes.dex
dex2oatd -Xms16m -Xmx16m \
- --boot-image=${ANDROID_PRODUCT_OUT}/system/framework/core.art \
- --dex-file=${ANDROID_PRODUCT_OUT}/system/framework/$TEST_NAME.jar \
- --oat=${ANDROID_PRODUCT_OUT}/system/framework/$TEST_NAME.oat \
+ --boot-image=${ANDROID_PRODUCT_OUT}/data/art-test/core.art \
+ --dex-file=${ANDROID_PRODUCT_OUT}/data/art-test/$TEST_NAME.jar \
+ --oat=${ANDROID_PRODUCT_OUT}/data/art-test/$TEST_NAME.oat \
--host-prefix=${ANDROID_PRODUCT_OUT}
diff --git a/test/023-many-interfaces/build b/test/023-many-interfaces/build
index 88201a2..171eaae 100644
--- a/test/023-many-interfaces/build
+++ b/test/023-many-interfaces/build
@@ -25,10 +25,10 @@
${JAVAC} -d classes src/*.java
dx --debug --dex --dump-to=classes.lst --output=classes.dex classes
-zip ${ANDROID_PRODUCT_OUT}/system/framework/$TEST_NAME.jar classes.dex
+zip ${ANDROID_PRODUCT_OUT}/data/art-test/$TEST_NAME.jar classes.dex
dex2oatd -Xms16m -Xmx16m \
- --boot-image=${ANDROID_PRODUCT_OUT}/system/framework/core.art \
- --dex-file=${ANDROID_PRODUCT_OUT}/system/framework/$TEST_NAME.jar \
- --oat=${ANDROID_PRODUCT_OUT}/system/framework/$TEST_NAME.oat \
+ --boot-image=${ANDROID_PRODUCT_OUT}/data/art-test/core.art \
+ --dex-file=${ANDROID_PRODUCT_OUT}/data/art-test/$TEST_NAME.jar \
+ --oat=${ANDROID_PRODUCT_OUT}/data/art-test/$TEST_NAME.oat \
--host-prefix=${ANDROID_PRODUCT_OUT}
diff --git a/test/085-old-style-inner-class/build b/test/085-old-style-inner-class/build
index a08c98b..32f7a86 100644
--- a/test/085-old-style-inner-class/build
+++ b/test/085-old-style-inner-class/build
@@ -26,10 +26,10 @@
dx --debug --dex --dump-to=classes.lst --output=classes.dex \
--dump-width=1000 classes 2>/dev/null
-zip ${ANDROID_PRODUCT_OUT}/system/framework/$TEST_NAME.jar classes.dex
+zip ${ANDROID_PRODUCT_OUT}/data/art-test/$TEST_NAME.jar classes.dex
dex2oatd -Xms16m -Xmx16m \
- --boot-image=${ANDROID_PRODUCT_OUT}/system/framework/core.art \
- --dex-file=${ANDROID_PRODUCT_OUT}/system/framework/$TEST_NAME.jar \
- --oat=${ANDROID_PRODUCT_OUT}/system/framework/$TEST_NAME.oat \
+ --boot-image=${ANDROID_PRODUCT_OUT}/data/art-test/core.art \
+ --dex-file=${ANDROID_PRODUCT_OUT}/data/art-test/$TEST_NAME.jar \
+ --oat=${ANDROID_PRODUCT_OUT}/data/art-test/$TEST_NAME.oat \
--host-prefix=${ANDROID_PRODUCT_OUT}