Refactor the compilers out of libart.
This builds three separate compilers and dynamically links with the right one
at runtime.
Change-Id: I59d22b9884f41de733c09f97e29ee290236d5f4b
diff --git a/Android.mk b/Android.mk
index 02080cc..168b619 100644
--- a/Android.mk
+++ b/Android.mk
@@ -27,6 +27,7 @@
include $(build_path)/Android.common.mk
include $(build_path)/Android.libart.mk
+include $(build_path)/Android.libart-compiler.mk
include $(build_path)/Android.executable.mk
include $(build_path)/Android.oat.mk
diff --git a/build/Android.common.mk b/build/Android.common.mk
index e84df3e..0ab796a 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -14,6 +14,7 @@
# limitations under the License.
#
+# TODO: move the LLVM compiler out into a separate .so too...
# Use llvm as the backend
ifneq ($(wildcard art/USE_LLVM_COMPILER),)
ART_USE_LLVM_COMPILER := true
@@ -21,20 +22,6 @@
ART_USE_LLVM_COMPILER := false
endif
-# Build for MIPS target (temporary)
-ifneq ($(wildcard art/MIPS_TARGET),)
-ART_MIPS_TARGET := true
-else
-ART_MIPS_TARGET := false
-endif
-
-# Build for x86 target (temporary)
-ifneq ($(wildcard art/X86_TARGET),)
-ART_X86_TARGET := true
-else
-ART_X86_TARGET := false
-endif
-
ifeq ($(ART_USE_LLVM_COMPILER),true)
LLVM_ROOT_PATH := external/llvm
include $(LLVM_ROOT_PATH)/llvm.mk
@@ -73,11 +60,6 @@
art_cflags += -DART_USE_LLVM_COMPILER=1
endif
-# (temp) for testing
-ifeq ($(ART_MIPS_TARGET),true)
-art_cflags += -D__mips_hard_float
-endif
-
ifeq ($(HOST_OS),linux)
art_non_debug_cflags := \
-Wframe-larger-than=1728
@@ -237,38 +219,11 @@
src/compiler_llvm/upcall_compiler.cc \
src/compiler_llvm/utils_llvm.cc
else
+# TODO: should these be in libart-compiler.so instead?
LIBART_COMMON_SRC_FILES += \
- src/compiler/Dataflow.cc \
- src/compiler/Frontend.cc \
- src/compiler/IntermediateRep.cc \
- src/compiler/Ralloc.cc \
- src/compiler/SSATransformation.cc \
- src/compiler/Utility.cc \
- src/compiler/codegen/RallocUtil.cc \
src/jni_compiler.cc \
src/jni_internal_arm.cc \
src/jni_internal_x86.cc
-ifeq ($(ART_MIPS_TARGET),true)
-LIBART_COMMON_SRC_FILES += \
- src/compiler/codegen/mips/ArchUtility.cc \
- src/compiler/codegen/mips/MipsRallocUtil.cc \
- src/compiler/codegen/mips/Assemble.cc \
- src/compiler/codegen/mips/mips/Codegen.cc
-else
-ifeq ($(ART_X86_TARGET),true)
-LIBART_COMMON_SRC_FILES += \
- src/compiler/codegen/x86/ArchUtility.cc \
- src/compiler/codegen/x86/X86RallocUtil.cc \
- src/compiler/codegen/x86/Assemble.cc \
- src/compiler/codegen/x86/x86/Codegen.cc
-else
-LIBART_COMMON_SRC_FILES += \
- src/compiler/codegen/arm/ArchUtility.cc \
- src/compiler/codegen/arm/ArmRallocUtil.cc \
- src/compiler/codegen/arm/Assemble.cc \
- src/compiler/codegen/arm/armv7-a/Codegen.cc
-endif
-endif
endif
LIBART_TARGET_SRC_FILES := \
diff --git a/build/Android.libart-compiler.mk b/build/Android.libart-compiler.mk
new file mode 100644
index 0000000..a9108e07
--- /dev/null
+++ b/build/Android.libart-compiler.mk
@@ -0,0 +1,141 @@
+#
+# Copyright (C) 2012 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LIBART_COMPILER_COMMON_SRC_FILES += \
+ src/compiler/Dataflow.cc \
+ src/compiler/Frontend.cc \
+ src/compiler/IntermediateRep.cc \
+ src/compiler/Ralloc.cc \
+ src/compiler/SSATransformation.cc \
+ src/compiler/Utility.cc \
+ src/compiler/codegen/RallocUtil.cc
+
+LIBART_COMPILER_ARM_SRC_FILES += \
+ $(LIBART_COMPILER_COMMON_SRC_FILES) \
+ src/compiler/codegen/arm/ArchUtility.cc \
+ src/compiler/codegen/arm/ArmRallocUtil.cc \
+ src/compiler/codegen/arm/Assemble.cc \
+ src/compiler/codegen/arm/armv7-a/Codegen.cc
+
+LIBART_COMPILER_MIPS_SRC_FILES += \
+ $(LIBART_COMPILER_COMMON_SRC_FILES) \
+ src/compiler/codegen/mips/ArchUtility.cc \
+ src/compiler/codegen/mips/MipsRallocUtil.cc \
+ src/compiler/codegen/mips/Assemble.cc \
+ src/compiler/codegen/mips/mips/Codegen.cc
+
+LIBART_COMPILER_X86_SRC_FILES += \
+ $(LIBART_COMPILER_COMMON_SRC_FILES) \
+ src/compiler/codegen/x86/ArchUtility.cc \
+ src/compiler/codegen/x86/X86RallocUtil.cc \
+ src/compiler/codegen/x86/Assemble.cc \
+ src/compiler/codegen/x86/x86/Codegen.cc
+
+# $(1): target or host
+# $(2): ndebug or debug
+# $(3): architecture name
+# $(4): list of source files
+define build-libart-compiler
+ ifneq ($(1),target)
+ ifneq ($(1),host)
+ $$(error expected target or host for argument 1, received $(1))
+ endif
+ endif
+ ifneq ($(2),ndebug)
+ ifneq ($(2),debug)
+ $$(error expected ndebug or debug for argument 2, received $(2))
+ endif
+ endif
+
+ art_target_or_host := $(1)
+ art_ndebug_or_debug := $(2)
+ libart_compiler_arch := $(3)
+ libart_compiler_src_files := $(4)
+
+ include $(CLEAR_VARS)
+ ifeq ($$(art_target_or_host),target)
+ include external/stlport/libstlport.mk
+ endif
+ LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
+ ifeq ($$(art_ndebug_or_debug),ndebug)
+ LOCAL_MODULE := libart-compiler-$(libart_compiler_arch)
+ else # debug
+ LOCAL_MODULE := libartd-compiler-$(libart_compiler_arch)
+ endif
+
+ LOCAL_MODULE_TAGS := optional
+ LOCAL_MODULE_CLASS := SHARED_LIBRARIES
+
+ LOCAL_SRC_FILES := $(libart_compiler_src_files)
+ ifeq ($$(art_target_or_host),target)
+ LOCAL_CFLAGS := $(ART_TARGET_CFLAGS)
+ else # host
+ LOCAL_CFLAGS := $(ART_HOST_CFLAGS)
+ endif
+ LOCAL_SHARED_LIBRARIES := liblog
+ ifeq ($$(art_ndebug_or_debug),debug)
+ ifeq ($$(art_target_or_host),target)
+ LOCAL_CFLAGS += $(ART_TARGET_DEBUG_CFLAGS)
+ else # host
+ LOCAL_CFLAGS += $(ART_HOST_DEBUG_CFLAGS)
+ endif
+ LOCAL_SHARED_LIBRARIES += libartd
+ else
+ ifeq ($$(art_target_or_host),target)
+ LOCAL_CFLAGS += $(ART_TARGET_NON_DEBUG_CFLAGS)
+ else # host
+ LOCAL_CFLAGS += $(ART_HOST_NON_DEBUG_CFLAGS)
+ endif
+ LOCAL_SHARED_LIBRARIES += libart
+ endif
+
+ # TODO: temporary hack for testing.
+ ifeq ($(libart_compiler_arch),MIPS)
+ LOCAL_CFLAGS += -D__mips_hard_float
+ endif
+
+ LOCAL_C_INCLUDES += $(ART_C_INCLUDES)
+ ifeq ($$(art_target_or_host),target)
+ LOCAL_SHARED_LIBRARIES += libstlport
+ else # host
+ LOCAL_LDLIBS := -ldl -lpthread
+ endif
+ ifeq ($$(art_target_or_host),target)
+ include $(BUILD_SHARED_LIBRARY)
+ else # host
+ LOCAL_IS_HOST_MODULE := true
+ include $(BUILD_HOST_SHARED_LIBRARY)
+ endif
+endef
+
+# $(1): target or host
+# $(2): ndebug or debug
+define build-libart-compilers
+ $(foreach arch,ARM MIPS X86,$(eval $(call build-libart-compiler,$(1),$(2),$(arch),$(LIBART_COMPILER_$(arch)_SRC_FILES))))
+endef
+
+ifeq ($(ART_BUILD_TARGET_NDEBUG),true)
+ $(eval $(call build-libart-compilers,target,ndebug))
+endif
+ifeq ($(ART_BUILD_TARGET_DEBUG),true)
+ $(eval $(call build-libart-compilers,target,debug))
+endif
+ifeq ($(ART_BUILD_HOST_NDEBUG),true)
+ $(eval $(call build-libart-compilers,host,ndebug))
+endif
+ifeq ($(ART_BUILD_HOST_DEBUG),true)
+ $(eval $(call build-libart-compilers,host,debug))
+endif
diff --git a/src/card_table.cc b/src/card_table.cc
index 386b27c..dc61c0a 100644
--- a/src/card_table.cc
+++ b/src/card_table.cc
@@ -21,6 +21,7 @@
#include "heap.h"
#include "heap_bitmap.h"
#include "logging.h"
+#include "runtime.h"
#include "utils.h"
namespace art {
@@ -96,6 +97,7 @@
}
void CardTable::Scan(byte* heap_begin, byte* heap_end, Callback* visitor, void* arg) const {
+ Heap* heap = Runtime::Current()->GetHeap();
byte* card_cur = CardFromAddr(heap_begin);
byte* card_end = CardFromAddr(heap_end);
while (card_cur < card_end) {
@@ -110,7 +112,7 @@
}
if (run > 0) {
byte* run_end = &card_cur[run];
- Heap::GetLiveBits()->VisitRange(reinterpret_cast<uintptr_t>(AddrFromCard(run_start)),
+ heap->GetLiveBits()->VisitRange(reinterpret_cast<uintptr_t>(AddrFromCard(run_start)),
reinterpret_cast<uintptr_t>(AddrFromCard(run_end)),
visitor, arg);
}
diff --git a/src/check_jni.cc b/src/check_jni.cc
index 6dc47f7..b8a492d 100644
--- a/src/check_jni.cc
+++ b/src/check_jni.cc
@@ -23,6 +23,7 @@
#include "logging.h"
#include "object_utils.h"
#include "scoped_jni_thread_state.h"
+#include "space.h"
#include "thread.h"
#include "runtime.h"
@@ -196,7 +197,7 @@
* obj will be NULL. Otherwise, obj should always be non-NULL
* and valid.
*/
- if (obj != NULL && !Heap::IsHeapAddress(obj)) {
+ if (obj != NULL && !Runtime::Current()->GetHeap()->IsHeapAddress(obj)) {
LOG(ERROR) << "JNI ERROR: field operation on invalid " << GetIndirectRefKind(java_object) << ": " << java_object;
JniAbort();
return;
@@ -234,7 +235,7 @@
ScopedJniThreadState ts(env_);
Object* o = Decode<Object*>(ts, java_object);
- if (o == NULL || !Heap::IsHeapAddress(o)) {
+ if (o == NULL || !Runtime::Current()->GetHeap()->IsHeapAddress(o)) {
LOG(ERROR) << "JNI ERROR: field operation on invalid " << GetIndirectRefKind(java_object) << ": " << java_object;
JniAbort();
return;
@@ -449,7 +450,7 @@
Class* c = reinterpret_cast<Class*>(Thread::Current()->DecodeJObject(jc));
if (c == NULL) {
msg += "NULL";
- } else if (c == kInvalidIndirectRefObject || !Heap::IsHeapAddress(c)) {
+ } else if (c == kInvalidIndirectRefObject || !Runtime::Current()->GetHeap()->IsHeapAddress(c)) {
StringAppendF(&msg, "INVALID POINTER:%p", jc);
} else if (!c->IsClass()) {
msg += "INVALID NON-CLASS OBJECT OF TYPE:" + PrettyTypeOf(c);
@@ -617,8 +618,9 @@
ScopedJniThreadState ts(env_);
Object* obj = Decode<Object*>(ts, java_object);
- if (!Heap::IsHeapAddress(obj)) {
- LOG(ERROR) << "JNI ERROR: " << what << " is an invalid " << GetIndirectRefKind(java_object) << ": " << java_object;
+ if (!Runtime::Current()->GetHeap()->IsHeapAddress(obj)) {
+ LOG(ERROR) << "JNI ERROR: " << what << " is an invalid " << GetIndirectRefKind(java_object) << ": "
+ << java_object << " (" << obj << ")";
JniAbort();
return false;
}
@@ -676,8 +678,8 @@
ScopedJniThreadState ts(env_);
Array* a = Decode<Array*>(ts, java_array);
- if (!Heap::IsHeapAddress(a)) {
- LOG(ERROR) << "JNI ERROR: jarray is an invalid " << GetIndirectRefKind(java_array) << ": " << reinterpret_cast<void*>(java_array);
+ if (!Runtime::Current()->GetHeap()->IsHeapAddress(a)) {
+ LOG(ERROR) << "JNI ERROR: jarray is an invalid " << GetIndirectRefKind(java_array) << ": " << reinterpret_cast<void*>(java_array) << " (" << a << ")";
JniAbort();
} else if (!a->IsArrayInstance()) {
LOG(ERROR) << "JNI ERROR: jarray argument has non-array type: " << PrettyTypeOf(a);
@@ -699,7 +701,7 @@
return NULL;
}
Field* f = DecodeField(fid);
- if (!Heap::IsHeapAddress(f)) {
+ if (!Runtime::Current()->GetHeap()->IsHeapAddress(f)) {
LOG(ERROR) << "JNI ERROR: invalid jfieldID: " << fid;
JniAbort();
return NULL;
@@ -714,7 +716,7 @@
return NULL;
}
Method* m = DecodeMethod(mid);
- if (!Heap::IsHeapAddress(m)) {
+ if (!Runtime::Current()->GetHeap()->IsHeapAddress(m)) {
LOG(ERROR) << "JNI ERROR: invalid jmethodID: " << mid;
JniAbort();
return NULL;
@@ -736,7 +738,7 @@
ScopedJniThreadState ts(env_);
Object* o = Decode<Object*>(ts, java_object);
- if (o != NULL && !Heap::IsHeapAddress(o)) {
+ if (o != NULL && !Runtime::Current()->GetHeap()->IsHeapAddress(o)) {
// TODO: when we remove work_around_app_jni_bugs, this should be impossible.
LOG(ERROR) << "JNI ERROR: native code passing in reference to invalid " << GetIndirectRefKind(java_object) << ": " << java_object;
JniAbort();
diff --git a/src/class_linker.cc b/src/class_linker.cc
index dd7f099..06ae301 100644
--- a/src/class_linker.cc
+++ b/src/class_linker.cc
@@ -241,7 +241,8 @@
CHECK(!init_done_);
// java_lang_Class comes first, it's needed for AllocClass
- SirtRef<Class> java_lang_Class(down_cast<Class*>(Heap::AllocObject(NULL, sizeof(ClassClass))));
+ Heap* heap = Runtime::Current()->GetHeap();
+ SirtRef<Class> java_lang_Class(down_cast<Class*>(heap->AllocObject(NULL, sizeof(ClassClass))));
CHECK(java_lang_Class.get() != NULL);
java_lang_Class->SetClass(java_lang_Class.get());
java_lang_Class->SetClassSize(sizeof(ClassClass));
@@ -484,7 +485,8 @@
Class* java_lang_ref_ReferenceQueue = FindSystemClass("Ljava/lang/ref/ReferenceQueue;");
Class* java_lang_ref_FinalizerReference = FindSystemClass("Ljava/lang/ref/FinalizerReference;");
- Heap::SetWellKnownClasses(java_lang_ref_FinalizerReference, java_lang_ref_ReferenceQueue);
+ Heap* heap = Runtime::Current()->GetHeap();
+ heap->SetWellKnownClasses(java_lang_ref_FinalizerReference, java_lang_ref_ReferenceQueue);
const DexFile& java_lang_dex = FindDexFile(java_lang_ref_Reference->GetDexCache());
@@ -518,7 +520,7 @@
CHECK_EQ(java_lang_dex.GetFieldId(zombie->GetDexFieldIndex()).type_idx_,
GetClassRoot(kJavaLangObject)->GetDexTypeIndex());
- Heap::SetReferenceOffsets(referent->GetOffset(),
+ heap->SetReferenceOffsets(referent->GetOffset(),
queue->GetOffset(),
queueNext->GetOffset(),
pendingNext->GetOffset(),
@@ -566,8 +568,9 @@
const char* class_path = Runtime::Current()->GetClassPathString().c_str();
+ Heap* heap = Runtime::Current()->GetHeap();
std::string boot_image_option_string("--boot-image=");
- boot_image_option_string += Heap::GetSpaces()[0]->AsImageSpace()->GetImageFilename();
+ boot_image_option_string += heap->GetSpaces()[0]->AsImageSpace()->GetImageFilename();
const char* boot_image_option = boot_image_option_string.c_str();
std::string dex_file_option_string("--dex-file=");
@@ -871,8 +874,9 @@
VLOG(startup) << "ClassLinker::InitFromImage entering";
CHECK(!init_done_);
- const std::vector<Space*>& spaces = Heap::GetSpaces();
- for (size_t i = 0; i < spaces.size(); i++) {
+ Heap* heap = Runtime::Current()->GetHeap();
+ const std::vector<Space*>& spaces = heap->GetSpaces();
+ for (size_t i = 0; i < spaces.size(); ++i) {
if (spaces[i]->IsImageSpace()) {
ImageSpace* space = spaces[i]->AsImageSpace();
OatFile* oat_file = OpenOat(space);
@@ -907,7 +911,7 @@
}
}
- HeapBitmap* heap_bitmap = Heap::GetLiveBits();
+ HeapBitmap* heap_bitmap = heap->GetLiveBits();
DCHECK(heap_bitmap != NULL);
// reinit clases_ table
@@ -1065,7 +1069,8 @@
Class* ClassLinker::AllocClass(Class* java_lang_Class, size_t class_size) {
DCHECK_GE(class_size, sizeof(Class));
- SirtRef<Class> klass(Heap::AllocObject(java_lang_Class, class_size)->AsClass());
+ Heap* heap = Runtime::Current()->GetHeap();
+ SirtRef<Class> klass(heap->AllocObject(java_lang_Class, class_size)->AsClass());
klass->SetPrimitiveType(Primitive::kPrimNot); // default to not being primitive
klass->SetClassSize(class_size);
return klass.get();
@@ -1147,7 +1152,7 @@
return DefineClass(descriptor, NULL, *pair.first, *pair.second);
}
- } else if (ClassLoader::UseCompileTimeClassPath()) {
+ } else if (Runtime::Current()->UseCompileTimeClassPath()) {
// first try the boot class path
Class* system_class = FindSystemClass(descriptor);
if (system_class != NULL) {
@@ -1158,7 +1163,7 @@
// next try the compile time class path
const std::vector<const DexFile*>& class_path
- = ClassLoader::GetCompileTimeClassPath(class_loader);
+ = Runtime::Current()->GetCompileTimeClassPath(class_loader);
DexFile::ClassPathEntry pair = DexFile::FindInClassPath(descriptor, class_path);
if (pair.second != NULL) {
return DefineClass(descriptor, class_loader, *pair.first, *pair.second);
@@ -1320,7 +1325,7 @@
const OatFile::OatClass* ClassLinker::GetOatClass(const DexFile& dex_file, const char* descriptor) {
DCHECK(descriptor != NULL);
- if (!Runtime::Current()->IsStarted() || ClassLoader::UseCompileTimeClassPath()) {
+ if (!Runtime::Current()->IsStarted() || Runtime::Current()->UseCompileTimeClassPath()) {
return NULL;
}
const OatFile* oat_file = FindOpenedOatFileForDexFile(dex_file);
@@ -2032,7 +2037,7 @@
if (!Runtime::Current()->IsStarted()) {
return false;
}
- if (ClassLoader::UseCompileTimeClassPath()) {
+ if (Runtime::Current()->UseCompileTimeClassPath()) {
return false;
}
const OatFile* oat_file = FindOpenedOatFileForDexFile(dex_file);
diff --git a/src/class_linker_test.cc b/src/class_linker_test.cc
index 8c97cf1..a6a58d6 100644
--- a/src/class_linker_test.cc
+++ b/src/class_linker_test.cc
@@ -964,7 +964,7 @@
// case 2, get the initialized storage from StaticsFromCode.getS0
SirtRef<ClassLoader> class_loader(LoadDex("StaticsFromCode"));
- const DexFile* dex_file = ClassLoader::GetCompileTimeClassPath(class_loader.get())[0];
+ const DexFile* dex_file = Runtime::Current()->GetCompileTimeClassPath(class_loader.get())[0];
CHECK(dex_file != NULL);
Class* klass = class_linker_->FindClass("LStaticsFromCode;", class_loader.get());
diff --git a/src/class_loader.cc b/src/class_loader.cc
index 6b6297b..3adb4ec 100644
--- a/src/class_loader.cc
+++ b/src/class_loader.cc
@@ -21,27 +21,6 @@
namespace art {
-bool ClassLoader::use_compile_time_class_path = false;
-ClassLoader::Table ClassLoader::compile_time_class_paths_;
-
-const std::vector<const DexFile*>& ClassLoader::GetCompileTimeClassPath(const ClassLoader* class_loader) {
- Runtime* runtime = Runtime::Current();
- if (class_loader == NULL) {
- return runtime->GetClassLinker()->GetBootClassPath();
- }
- CHECK(ClassLoader::UseCompileTimeClassPath());
- Table::const_iterator it = compile_time_class_paths_.find(class_loader);
- CHECK(it != compile_time_class_paths_.end());
- return it->second;
-}
-
-void ClassLoader::SetCompileTimeClassPath(const ClassLoader* class_loader,
- std::vector<const DexFile*>& class_path) {
- CHECK(!Runtime::Current()->IsStarted());
- use_compile_time_class_path = true;
- compile_time_class_paths_[class_loader] = class_path;
-}
-
// TODO: get global references for these
Class* PathClassLoader::dalvik_system_PathClassLoader_ = NULL;
@@ -49,7 +28,7 @@
CHECK(!Runtime::Current()->IsStarted());
DCHECK(dalvik_system_PathClassLoader_ != NULL);
SirtRef<PathClassLoader> p(down_cast<PathClassLoader*>(dalvik_system_PathClassLoader_->AllocObject()));
- SetCompileTimeClassPath(p.get(), dex_files);
+ Runtime::Current()->SetCompileTimeClassPath(p.get(), dex_files);
return p.get();
}
diff --git a/src/class_loader.h b/src/class_loader.h
index 018cea9..8d1c696 100644
--- a/src/class_loader.h
+++ b/src/class_loader.h
@@ -17,7 +17,6 @@
#ifndef ART_SRC_CLASS_LOADER_H_
#define ART_SRC_CLASS_LOADER_H_
-#include <map>
#include <vector>
#include "dex_file.h"
@@ -27,31 +26,18 @@
// C++ mirror of java.lang.ClassLoader
class MANAGED ClassLoader : public Object {
- public:
- static const std::vector<const DexFile*>& GetCompileTimeClassPath(const ClassLoader* class_loader);
- static void SetCompileTimeClassPath(const ClassLoader* class_loader, std::vector<const DexFile*>& class_path);
- static bool UseCompileTimeClassPath() {
- return use_compile_time_class_path;
- }
-
private:
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
Object* packages_;
ClassLoader* parent_;
Object* proxyCache_;
- typedef std::map<const ClassLoader*, std::vector<const DexFile*> > Table;
- static Table compile_time_class_paths_;
-
- static bool use_compile_time_class_path;
-
friend struct ClassLoaderOffsets; // for verifying offset information
DISALLOW_IMPLICIT_CONSTRUCTORS(ClassLoader);
};
// C++ mirror of dalvik.system.BaseDexClassLoader
-// TODO: add MANAGED when class_path_ removed
-class BaseDexClassLoader : public ClassLoader {
+class MANAGED BaseDexClassLoader : public ClassLoader {
private:
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
String* original_path_;
@@ -62,8 +48,7 @@
};
// C++ mirror of dalvik.system.PathClassLoader
-// TODO: add MANAGED when class_path_ removed
-class PathClassLoader : public BaseDexClassLoader {
+class MANAGED PathClassLoader : public BaseDexClassLoader {
public:
static PathClassLoader* AllocCompileTime(std::vector<const DexFile*>& dex_files);
static void SetClass(Class* dalvik_system_PathClassLoader);
@@ -76,4 +61,4 @@
} // namespace art
-#endif // ART_SRC_OBJECT_H_
+#endif // ART_SRC_CLASS_LOADER_H_
diff --git a/src/common_test.h b/src/common_test.h
index dfee9b9..7da5c77 100644
--- a/src/common_test.h
+++ b/src/common_test.h
@@ -355,7 +355,7 @@
compiler_->GetCompilerLLVM()->SetElfFileName("gtest");
#endif
- Heap::VerifyHeap(); // Check for heap corruption before the test
+ Runtime::Current()->GetHeap()->VerifyHeap(); // Check for heap corruption before the test
}
virtual void TearDown() {
@@ -398,7 +398,7 @@
compiler_.reset();
STLDeleteElements(&opened_dex_files_);
- Heap::VerifyHeap(); // Check for heap corruption after the test
+ Runtime::Current()->GetHeap()->VerifyHeap(); // Check for heap corruption after the test
}
std::string GetLibCoreDexFileName() {
diff --git a/src/compiler.cc b/src/compiler.cc
index 332ea25..9539972 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -18,6 +18,7 @@
#include <vector>
+#include <dlfcn.h>
#include <sys/mman.h>
#include <unistd.h>
@@ -31,6 +32,7 @@
#include "oat_file.h"
#include "object_utils.h"
#include "runtime.h"
+#include "space.h"
#include "stl_util.h"
#include "timing_logger.h"
@@ -44,13 +46,6 @@
namespace art {
-#if !defined(ART_USE_LLVM_COMPILER)
-CompiledMethod* oatCompileMethod(Compiler& compiler, const DexFile::CodeItem* code_item,
- uint32_t access_flags, uint32_t method_idx,
- const ClassLoader* class_loader,
- const DexFile& dex_file, InstructionSet);
-#endif
-
namespace arm {
ByteArray* CreateAbstractMethodErrorStub();
CompiledInvokeStub* ArmCreateInvokeStub(bool is_static, const char* shorty, uint32_t shorty_len);
@@ -213,6 +208,28 @@
DISALLOW_COPY_AND_ASSIGN(AOTCompilationStats);;
};
+static std::string MakeCompilerSoName(InstructionSet instruction_set) {
+ // TODO: is the ARM/Thumb2 instruction set distinction really buying us anything,
+ // or just causing hassle like this?
+ if (instruction_set == kThumb2) {
+ instruction_set = kArm;
+ }
+
+ std::ostringstream instruction_set_name_os;
+ instruction_set_name_os << instruction_set;
+ std::string instruction_set_name(instruction_set_name_os.str());
+ for (size_t i = 0; i < instruction_set_name.size(); ++i) {
+ instruction_set_name[i] = toupper(instruction_set_name[i]);
+ }
+#ifndef NDEBUG
+ const char* suffix = "d";
+#else
+ const char* suffix = "";
+#endif
+ std::string name(StringPrintf("art%s-compiler-%s", suffix, instruction_set_name.c_str()));
+ return StringPrintf(OS_SHARED_LIB_FORMAT_STR, name.c_str());
+}
+
Compiler::Compiler(InstructionSet instruction_set, bool image, size_t thread_count,
bool support_debugging, const std::set<std::string>* image_classes)
: instruction_set_(instruction_set),
@@ -226,12 +243,28 @@
thread_count_(thread_count),
support_debugging_(support_debugging),
stats_(new AOTCompilationStats),
- image_classes_(image_classes)
-#if defined(ART_USE_LLVM_COMPILER)
- ,
+ image_classes_(image_classes),
+#if !defined(ART_USE_LLVM_COMPILER)
+ compiler_library_(NULL),
+ compiler_(NULL)
+#else
compiler_llvm_(new compiler_llvm::CompilerLLVM(this, instruction_set))
#endif
- {
+{
+ std::string compiler_so_name(MakeCompilerSoName(instruction_set));
+ compiler_library_ = dlopen(compiler_so_name.c_str(), RTLD_LAZY);
+ if (compiler_library_ == NULL) {
+ LOG(FATAL) << "Couldn't find compiler library " << compiler_so_name << ": " << dlerror();
+ }
+ VLOG(compiler) << "dlopen(\"" << compiler_so_name << "\", RTLD_LAZY) returned " << compiler_library_;
+
+ compiler_ = reinterpret_cast<CompilerFn>(dlsym(compiler_library_, "oatCompileMethod"));
+ if (compiler_ == NULL) {
+ LOG(FATAL) << "Couldn't find \"oatCompileMethod\" in compiler library " << compiler_so_name << ": " << dlerror();
+ }
+
+ VLOG(compiler) << "dlsym(compiler_library, \"oatCompileMethod\") returned " << reinterpret_cast<void*>(compiler_);
+
CHECK(!Runtime::Current()->IsStarted());
if (!image_) {
CHECK(image_classes_ == NULL);
@@ -251,6 +284,10 @@
MutexLock mu(compiled_invoke_stubs_lock_);
STLDeleteValues(&compiled_invoke_stubs_);
}
+ if (compiler_library_ != NULL) {
+ VLOG(compiler) << "dlclose(" << compiler_library_ << ")";
+ dlclose(compiler_library_);
+ }
}
ByteArray* Compiler::CreateResolutionStub(InstructionSet instruction_set,
@@ -1064,8 +1101,8 @@
#if defined(ART_USE_LLVM_COMPILER)
compiled_method = compiler_llvm_->CompileDexMethod(&oat_compilation_unit);
#else
- compiled_method = oatCompileMethod(*this, code_item, access_flags, method_idx, class_loader,
- dex_file, kThumb2);
+ compiled_method = (*compiler_)(*this, code_item, access_flags, method_idx, class_loader,
+ dex_file);
#endif
CHECK(compiled_method != NULL) << PrettyMethod(method_idx, dex_file);
}
diff --git a/src/compiler.h b/src/compiler.h
index 639ab4f..31ee613 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -203,6 +203,14 @@
#if defined(ART_USE_LLVM_COMPILER)
UniquePtr<compiler_llvm::CompilerLLVM> compiler_llvm_;
+#else
+ void* compiler_library_;
+ typedef CompiledMethod* (*CompilerFn)(Compiler& compiler,
+ const DexFile::CodeItem* code_item,
+ uint32_t access_flags, uint32_t method_idx,
+ const ClassLoader* class_loader,
+ const DexFile& dex_file);
+ CompilerFn compiler_;
#endif
DISALLOW_COPY_AND_ASSIGN(Compiler);
diff --git a/src/compiler/Compiler.h b/src/compiler/Compiler.h
index 1e0439b..8636e11 100644
--- a/src/compiler/Compiler.h
+++ b/src/compiler/Compiler.h
@@ -40,15 +40,7 @@
*/
#define MAX_ASSEMBLER_RETRIES 50
-typedef enum OatInstructionSetType {
- DALVIK_OAT_NONE = 0,
- DALVIK_OAT_ARM,
- DALVIK_OAT_THUMB2,
- DALVIK_OAT_X86,
- DALVIK_OAT_MIPS32
-} OatInstructionSetType;
-
-/* Supress optimization if corresponding bit set */
+/* Suppress optimization if corresponding bit set */
enum optControlVector {
kLoadStoreElimination = 0,
kLoadHoisting,
@@ -183,9 +175,6 @@
bool oatArchInit(void);
bool oatStartup(void);
void oatShutdown(void);
-CompiledMethod* oatCompileMethod(Compiler& compiler, bool is_direct,
- uint32_t method_idx, const ClassLoader* class_loader,
- const DexFile& dex_file, OatInstructionSetType);
void oatScanAllClassPointers(void (*callback)(void* ptr));
void oatInitializeSSAConversion(struct CompilationUnit* cUnit);
int oatConvertSSARegToDalvik(const struct CompilationUnit* cUnit, int ssaReg);
@@ -220,4 +209,10 @@
} // namespace art
+extern "C" art::CompiledMethod* oatCompileMethod(art::Compiler& compiler,
+ const art::DexFile::CodeItem* code_item,
+ uint32_t access_flags, uint32_t method_idx,
+ const art::ClassLoader* class_loader,
+ const art::DexFile& dex_file);
+
#endif // ART_SRC_COMPILER_COMPILER_H_
diff --git a/src/compiler/CompilerIR.h b/src/compiler/CompilerIR.h
index 6bda522..2515713 100644
--- a/src/compiler/CompilerIR.h
+++ b/src/compiler/CompilerIR.h
@@ -307,7 +307,7 @@
bool methodTraceSupport; // For TraceView profiling
struct RegisterPool* regPool;
int optRound; // round number to tell an LIR's age
- OatInstructionSetType instructionSet;
+ InstructionSet instructionSet;
/* Number of total regs used in the whole cUnit after SSA transformation */
int numSSARegs;
/* Map SSA reg i to the Dalvik[15..0]/Sub[31..16] pair. */
diff --git a/src/compiler/Frontend.cc b/src/compiler/Frontend.cc
index 7988829..4271722 100644
--- a/src/compiler/Frontend.cc
+++ b/src/compiler/Frontend.cc
@@ -724,15 +724,20 @@
}
}
-/*
- * Compile a method.
- */
-CompiledMethod* oatCompileMethod(Compiler& compiler,
- const DexFile::CodeItem* code_item,
- uint32_t access_flags, uint32_t method_idx,
- const ClassLoader* class_loader,
- const DexFile& dex_file,
- InstructionSet insnSet)
+void oatInit(CompilationUnit* cUnit, const Compiler& compiler) {
+ if (!oatArchInit()) {
+ LOG(FATAL) << "Failed to initialize oat";
+ }
+ if (!oatHeapInit(cUnit)) {
+ LOG(FATAL) << "Failed to initialize oat heap";
+ }
+}
+
+CompiledMethod* oatCompileMethodInternal(Compiler& compiler,
+ const DexFile::CodeItem* code_item,
+ uint32_t access_flags, uint32_t method_idx,
+ const ClassLoader* class_loader,
+ const DexFile& dex_file)
{
VLOG(compiler) << "Compiling " << PrettyMethod(method_idx, dex_file) << "...";
@@ -746,6 +751,7 @@
memset(cUnit.get(), 0, sizeof(*cUnit));
oatInit(cUnit.get(), compiler);
+
cUnit->compiler = &compiler;
cUnit->class_linker = class_linker;
cUnit->dex_file = &dex_file;
@@ -754,7 +760,7 @@
cUnit->code_item = code_item;
cUnit->access_flags = access_flags;
cUnit->shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx));
- cUnit->instructionSet = (OatInstructionSetType)insnSet;
+ cUnit->instructionSet = compiler.GetInstructionSet();
cUnit->insns = code_item->insns_;
cUnit->insnsSize = code_item->insns_size_in_code_units_;
cUnit->numIns = code_item->ins_size_;
@@ -943,7 +949,7 @@
(1 << kPromoteRegs);
if (cUnit->printMe) {
LOG(INFO) << "Compiler: " << PrettyMethod(method_idx, dex_file)
- << " too big: " << cUnit->numBlocks;
+ << " too big: " << cUnit->numBlocks;
}
}
}
@@ -1027,14 +1033,14 @@
return result;
}
-void oatInit(CompilationUnit* cUnit, const Compiler& compiler)
-{
- if (!oatArchInit()) {
- LOG(FATAL) << "Failed to initialize oat";
- }
- if (!oatHeapInit(cUnit)) {
- LOG(FATAL) << "Failed to initialize oat heap";
- }
-}
-
} // namespace art
+
+extern "C" art::CompiledMethod* oatCompileMethod(art::Compiler& compiler,
+ const art::DexFile::CodeItem* code_item,
+ uint32_t access_flags, uint32_t method_idx,
+ const art::ClassLoader* class_loader,
+ const art::DexFile& dex_file)
+{
+ CHECK_EQ(compiler.GetInstructionSet(), art::oatInstructionSet());
+ return art::oatCompileMethodInternal(compiler, code_item, access_flags, method_idx, class_loader, dex_file);
+}
diff --git a/src/compiler/Ralloc.cc b/src/compiler/Ralloc.cc
index 3353437..abc573e 100644
--- a/src/compiler/Ralloc.cc
+++ b/src/compiler/Ralloc.cc
@@ -406,9 +406,10 @@
}
/* Figure out the frame size */
- cUnit->numPadding = (STACK_ALIGN_WORDS -
+ static const int kStackAlignWords = kStackAlignment/sizeof(uint32_t);
+ cUnit->numPadding = (kStackAlignWords -
(cUnit->numCoreSpills + cUnit->numFPSpills + cUnit->numRegs +
- cUnit->numOuts + 2)) & (STACK_ALIGN_WORDS-1);
+ cUnit->numOuts + 2)) & (kStackAlignWords - 1);
cUnit->frameSize = (cUnit->numCoreSpills + cUnit->numFPSpills +
cUnit->numRegs + cUnit->numOuts +
cUnit->numPadding + 2) * 4;
diff --git a/src/compiler/codegen/CodegenFactory.cc b/src/compiler/codegen/CodegenFactory.cc
index b27efe0..8a6e1bc 100644
--- a/src/compiler/codegen/CodegenFactory.cc
+++ b/src/compiler/codegen/CodegenFactory.cc
@@ -266,7 +266,7 @@
}
/*
- * Utiltiy to load the current Method*. Broken out
+ * Utility to load the current Method*. Broken out
* to allow easy change between placing the current Method* in a
* dedicated register or its home location in the frame.
*/
diff --git a/src/compiler/codegen/CompilerCodegen.h b/src/compiler/codegen/CompilerCodegen.h
index 1b54be9..9537e46 100644
--- a/src/compiler/codegen/CompilerCodegen.h
+++ b/src/compiler/codegen/CompilerCodegen.h
@@ -51,7 +51,7 @@
void oatInitializeRegAlloc(CompilationUnit* cUnit);
/* Implemented in codegen/<target>/<target_variant>/ArchVariant.c */
-OatInstructionSetType oatInstructionSet(void);
+InstructionSet oatInstructionSet();
/*
* Implemented in codegen/<target>/<target_variant>/ArchVariant.c
diff --git a/src/compiler/codegen/Optimizer.h b/src/compiler/codegen/Optimizer.h
index 74a5b75..06c7732 100644
--- a/src/compiler/codegen/Optimizer.h
+++ b/src/compiler/codegen/Optimizer.h
@@ -21,9 +21,6 @@
namespace art {
-#define STACK_ALIGN_WORDS 4
-#define STACK_ALIGNMENT (STACK_ALIGN_WORDS * 4)
-
/* Forward declarations */
struct CompilationUnit;
struct LIR;
diff --git a/src/compiler/codegen/RallocUtil.cc b/src/compiler/codegen/RallocUtil.cc
index 232b304..cfda721 100644
--- a/src/compiler/codegen/RallocUtil.cc
+++ b/src/compiler/codegen/RallocUtil.cc
@@ -1227,23 +1227,4 @@
return oatVRegOffset(cUnit, oatS2VReg(cUnit, sReg));
}
-
-/* Return sp-relative offset in bytes using Method* */
-extern int oatVRegOffset(const DexFile::CodeItem* code_item,
- uint32_t core_spills, uint32_t fp_spills,
- size_t frame_size, int reg)
-{
- int numIns = code_item->ins_size_;
- int numRegs = code_item->registers_size_ - numIns;
- int numOuts = code_item->outs_size_;
- int numSpills = __builtin_popcount(core_spills) +
- __builtin_popcount(fp_spills);
- int numPadding = (STACK_ALIGN_WORDS -
- (numSpills + numRegs + numOuts + 2)) & (STACK_ALIGN_WORDS-1);
- int regsOffset = (numOuts + numPadding + 1) * 4;
- int insOffset = frame_size + 4;
- return (reg < numRegs) ? regsOffset + (reg << 2) :
- insOffset + ((reg - numRegs) << 2);
-}
-
} // namespace art
diff --git a/src/compiler/codegen/arm/armv7-a-neon/ArchVariant.cc b/src/compiler/codegen/arm/armv7-a-neon/ArchVariant.cc
index 77844fc..dcf3a99 100644
--- a/src/compiler/codegen/arm/armv7-a-neon/ArchVariant.cc
+++ b/src/compiler/codegen/arm/armv7-a-neon/ArchVariant.cc
@@ -20,9 +20,9 @@
* Determine the initial instruction set to be used for this trace.
* Later components may decide to change this.
*/
-OatInstructionSetType oatInstructionSet(void)
+InstructionSet oatInstructionSet()
{
- return DALVIK_OAT_THUMB2;
+ return kThumb2;
}
/* Architecture-specific initializations and checks go here */
diff --git a/src/compiler/codegen/arm/armv7-a/ArchVariant.cc b/src/compiler/codegen/arm/armv7-a/ArchVariant.cc
index 77844fc..dcf3a99 100644
--- a/src/compiler/codegen/arm/armv7-a/ArchVariant.cc
+++ b/src/compiler/codegen/arm/armv7-a/ArchVariant.cc
@@ -20,9 +20,9 @@
* Determine the initial instruction set to be used for this trace.
* Later components may decide to change this.
*/
-OatInstructionSetType oatInstructionSet(void)
+InstructionSet oatInstructionSet()
{
- return DALVIK_OAT_THUMB2;
+ return kThumb2;
}
/* Architecture-specific initializations and checks go here */
diff --git a/src/compiler/codegen/mips/mips/ArchVariant.cc b/src/compiler/codegen/mips/mips/ArchVariant.cc
index 6d29fc5..6b04d70 100644
--- a/src/compiler/codegen/mips/mips/ArchVariant.cc
+++ b/src/compiler/codegen/mips/mips/ArchVariant.cc
@@ -25,9 +25,9 @@
* Determine the initial instruction set to be used for this trace.
* Later components may decide to change this.
*/
-OatInstructionSetType oatInstructionSet(void)
+InstructionSet oatInstructionSet()
{
- return DALVIK_OAT_MIPS32;
+ return kMips;
}
/* Architecture-specific initializations and checks go here */
diff --git a/src/compiler/codegen/x86/x86/ArchVariant.cc b/src/compiler/codegen/x86/x86/ArchVariant.cc
index 93b17e1..944311c 100644
--- a/src/compiler/codegen/x86/x86/ArchVariant.cc
+++ b/src/compiler/codegen/x86/x86/ArchVariant.cc
@@ -25,9 +25,9 @@
* Determine the initial instruction set to be used for this trace.
* Later components may decide to change this.
*/
-OatInstructionSetType oatInstructionSet(void)
+InstructionSet oatInstructionSet()
{
- return DALVIK_OAT_X86;
+ return kX86;
}
/* Architecture-specific initializations and checks go here */
diff --git a/src/compiler_test.cc b/src/compiler_test.cc
index 417d352..272f838 100644
--- a/src/compiler_test.cc
+++ b/src/compiler_test.cc
@@ -33,7 +33,7 @@
protected:
void CompileAll(const ClassLoader* class_loader) {
- compiler_->CompileAll(class_loader, ClassLoader::GetCompileTimeClassPath(class_loader));
+ compiler_->CompileAll(class_loader, Runtime::Current()->GetCompileTimeClassPath(class_loader));
MakeAllExecutable(class_loader);
}
@@ -54,7 +54,7 @@
void MakeAllExecutable(const ClassLoader* class_loader) {
const std::vector<const DexFile*>& class_path
- = ClassLoader::GetCompileTimeClassPath(class_loader);
+ = Runtime::Current()->GetCompileTimeClassPath(class_loader);
for (size_t i = 0; i != class_path.size(); ++i) {
const DexFile* dex_file = class_path[i];
CHECK(dex_file != NULL);
@@ -126,6 +126,7 @@
}
TEST_F(CompilerTest, AbstractMethodErrorStub) {
+#if defined(__arm__)
CompileVirtualMethod(NULL, "java.lang.Class", "isFinalizable", "()Z");
CompileDirectMethod(NULL, "java.lang.Object", "<init>", "()V");
@@ -139,7 +140,6 @@
jobject jobj_ = env_->NewObject(c_class, constructor);
ASSERT_TRUE(jobj_ != NULL);
-#if defined(__arm__)
Class* jlame = class_linker_->FindClass("Ljava/lang/AbstractMethodError;", class_loader.get());
// Force non-virtual call to AbstractClass foo, will throw AbstractMethodError exception.
env_->CallNonvirtualVoidMethod(jobj_, class_, mid_);
diff --git a/src/dalvik_system_VMDebug.cc b/src/dalvik_system_VMDebug.cc
index 5f4b6d9..62a1f43 100644
--- a/src/dalvik_system_VMDebug.cc
+++ b/src/dalvik_system_VMDebug.cc
@@ -240,7 +240,7 @@
if (c == NULL) {
return 0;
}
- return Heap::CountInstances(c, countAssignable);
+ return Runtime::Current()->GetHeap()->CountInstances(c, countAssignable);
}
JNINativeMethod gMethods[] = {
diff --git a/src/dalvik_system_VMRuntime.cc b/src/dalvik_system_VMRuntime.cc
index c8a3a4c..cca5d2b 100644
--- a/src/dalvik_system_VMRuntime.cc
+++ b/src/dalvik_system_VMRuntime.cc
@@ -19,6 +19,7 @@
#include "jni_internal.h"
#include "object.h"
#include "object_utils.h"
+#include "scoped_heap_lock.h"
#include "space.h"
#include "thread.h"
@@ -32,11 +33,11 @@
namespace {
jfloat VMRuntime_getTargetHeapUtilization(JNIEnv*, jobject) {
- return Heap::GetTargetHeapUtilization();
+ return Runtime::Current()->GetHeap()->GetTargetHeapUtilization();
}
void VMRuntime_nativeSetTargetHeapUtilization(JNIEnv*, jobject, jfloat target) {
- Heap::SetTargetHeapUtilization(target);
+ Runtime::Current()->GetHeap()->SetTargetHeapUtilization(target);
}
void VMRuntime_startJitCompilation(JNIEnv*, jobject) {
@@ -91,7 +92,7 @@
}
void VMRuntime_clearGrowthLimit(JNIEnv*, jobject) {
- Heap::ClearGrowthLimit();
+ Runtime::Current()->GetHeap()->ClearGrowthLimit();
}
jboolean VMRuntime_isDebuggerActive(JNIEnv*, jobject) {
@@ -132,7 +133,7 @@
// running with CheckJNI forces you to obey the strictest rules.
if (!env_ext->check_jni) {
LOG(INFO) << "Turning on JNI app bug workarounds for target SDK version "
- << targetSdkVersion << "...";
+ << targetSdkVersion << "...";
env_ext->vm->work_around_app_jni_bugs = true;
}
}
@@ -140,10 +141,11 @@
void VMRuntime_trimHeap(JNIEnv* env, jobject) {
ScopedHeapLock heap_lock;
- size_t alloc_space_size = Heap::GetAllocSpace()->Size();
- float utilization = static_cast<float>(Heap::GetBytesAllocated()) / alloc_space_size;
+ Heap* heap = Runtime::Current()->GetHeap();
+ size_t alloc_space_size = heap->GetAllocSpace()->Size();
+ float utilization = static_cast<float>(heap->GetBytesAllocated()) / alloc_space_size;
uint64_t start_ns = NanoTime();
- Heap::GetAllocSpace()->Trim();
+ heap->GetAllocSpace()->Trim();
LOG(INFO) << "Parallel heap trimming took " << PrettyDuration(NanoTime() - start_ns)
<< " on a " << PrettySize(alloc_space_size)
<< " heap with " << static_cast<int>(100 * utilization) << "% utilization";
diff --git a/src/dalvik_system_VMStack.cc b/src/dalvik_system_VMStack.cc
index 7425847..37ee8b2 100644
--- a/src/dalvik_system_VMStack.cc
+++ b/src/dalvik_system_VMStack.cc
@@ -17,6 +17,7 @@
#include "jni_internal.h"
#include "class_loader.h"
#include "object.h"
+#include "scoped_heap_lock.h"
#include "thread_list.h"
#include "JniConstants.h" // Last to avoid problems with LOG redefinition.
diff --git a/src/debugger.cc b/src/debugger.cc
index 95e8849..1480ee7 100644
--- a/src/debugger.cc
+++ b/src/debugger.cc
@@ -1631,7 +1631,7 @@
CHECK_EQ(width, sizeof(JDWP::ObjectId));
Object* o = reinterpret_cast<Object*>(f.GetVReg(m, reg));
VLOG(jdwp) << "get array local " << reg << " = " << o;
- if (o != NULL && !Heap::IsHeapAddress(o)) {
+ if (o != NULL && !Runtime::Current()->GetHeap()->IsHeapAddress(o)) {
LOG(FATAL) << "Register " << reg << " expected to hold array: " << o;
}
JDWP::SetObjectId(buf+1, gRegistry->Add(o));
@@ -1647,7 +1647,7 @@
CHECK_EQ(width, sizeof(JDWP::ObjectId));
Object* o = reinterpret_cast<Object*>(f.GetVReg(m, reg));
VLOG(jdwp) << "get object local " << reg << " = " << o;
- if (o != NULL && !Heap::IsHeapAddress(o)) {
+ if (o != NULL && !Runtime::Current()->GetHeap()->IsHeapAddress(o)) {
LOG(FATAL) << "Register " << reg << " expected to hold object: " << o;
}
tag = TagFromObject(o);
@@ -2572,15 +2572,16 @@
* [u4]: current number of objects allocated
*/
uint8_t heap_count = 1;
+ Heap* heap = Runtime::Current()->GetHeap();
std::vector<uint8_t> bytes;
JDWP::Append4BE(bytes, heap_count);
JDWP::Append4BE(bytes, 1); // Heap id (bogus; we only have one heap).
JDWP::Append8BE(bytes, MilliTime());
JDWP::Append1BE(bytes, reason);
- JDWP::Append4BE(bytes, Heap::GetMaxMemory()); // Max allowed heap size in bytes.
- JDWP::Append4BE(bytes, Heap::GetTotalMemory()); // Current heap size in bytes.
- JDWP::Append4BE(bytes, Heap::GetBytesAllocated());
- JDWP::Append4BE(bytes, Heap::GetObjectsAllocated());
+ JDWP::Append4BE(bytes, heap->GetMaxMemory()); // Max allowed heap size in bytes.
+ JDWP::Append4BE(bytes, heap->GetTotalMemory()); // Current heap size in bytes.
+ JDWP::Append4BE(bytes, heap->GetBytesAllocated());
+ JDWP::Append4BE(bytes, heap->GetObjectsAllocated());
CHECK_EQ(bytes.size(), 4U + (heap_count * (4 + 8 + 1 + 4 + 4 + 4 + 4)));
Dbg::DdmSendChunk(CHUNK_TYPE("HPIF"), bytes);
}
@@ -2736,7 +2737,7 @@
// If we're looking at the native heap, we'll just return
// (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks.
- if (is_native_heap || !Heap::IsLiveObjectLocked(o)) {
+ if (is_native_heap || !Runtime::Current()->GetHeap()->IsLiveObjectLocked(o)) {
return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
}
@@ -2746,7 +2747,7 @@
return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
}
- if (!Heap::IsHeapAddress(c)) {
+ if (!Runtime::Current()->GetHeap()->IsHeapAddress(c)) {
LOG(WARNING) << "Invalid class for managed heap object: " << o << " " << c;
return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
}
@@ -2810,7 +2811,8 @@
// dlmalloc_inspect_all(HeapChunkContext::HeapChunkCallback, &context);
UNIMPLEMENTED(WARNING) << "Native heap send heap segments";
} else {
- Heap::GetAllocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
+ Heap* heap = Runtime::Current()->GetHeap();
+ heap->GetAllocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
}
// Finally, send a heap end chunk.
diff --git a/src/dex2oat.cc b/src/dex2oat.cc
index d5d036f..b8d782b 100644
--- a/src/dex2oat.cc
+++ b/src/dex2oat.cc
@@ -240,9 +240,10 @@
const std::string& oat_filename,
const std::string& oat_location) {
// If we have an existing boot image, position new space after its oat file
- if (Heap::GetSpaces().size() > 1) {
+ Heap* heap = Runtime::Current()->GetHeap();
+ if (heap->GetSpaces().size() > 1) {
ImageSpace* last_image_space = NULL;
- const std::vector<Space*>& spaces = Heap::GetSpaces();
+ const std::vector<Space*>& spaces = heap->GetSpaces();
for (size_t i=0; i < spaces.size(); i++) {
if (spaces[i]->IsImageSpace()) {
last_image_space = spaces[i]->AsImageSpace();
@@ -250,7 +251,7 @@
}
CHECK(last_image_space != NULL);
CHECK(last_image_space->IsImageSpace());
- CHECK(!Heap::GetSpaces()[Heap::GetSpaces().size()-1]->IsImageSpace());
+ CHECK(!heap->GetSpaces()[heap->GetSpaces().size()-1]->IsImageSpace());
byte* oat_limit_addr = last_image_space->GetImageHeader().GetOatEnd();
image_base = RoundUp(reinterpret_cast<uintptr_t>(oat_limit_addr), kPageSize);
}
diff --git a/src/dex_verifier.cc b/src/dex_verifier.cc
index 5b8102a..91dd4ea 100644
--- a/src/dex_verifier.cc
+++ b/src/dex_verifier.cc
@@ -4030,47 +4030,61 @@
return NULL;
}
-Mutex DexVerifier::gc_maps_lock_("verifier gc maps lock");
-DexVerifier::GcMapTable DexVerifier::gc_maps_;
+Mutex* DexVerifier::gc_maps_lock_ = NULL;
+DexVerifier::GcMapTable* DexVerifier::gc_maps_ = NULL;
+
+void DexVerifier::InitGcMaps() {
+ gc_maps_lock_ = new Mutex("verifier GC maps lock");
+ MutexLock mu(*gc_maps_lock_);
+ gc_maps_ = new DexVerifier::GcMapTable;
+}
+
+void DexVerifier::DeleteGcMaps() {
+ MutexLock mu(*gc_maps_lock_);
+ STLDeleteValues(gc_maps_);
+}
void DexVerifier::SetGcMap(Compiler::MethodReference ref, const std::vector<uint8_t>& gc_map) {
- MutexLock mu(gc_maps_lock_);
+ MutexLock mu(*gc_maps_lock_);
const std::vector<uint8_t>* existing_gc_map = GetGcMap(ref);
if (existing_gc_map != NULL) {
CHECK(*existing_gc_map == gc_map);
delete existing_gc_map;
}
- gc_maps_[ref] = &gc_map;
+ (*gc_maps_)[ref] = &gc_map;
CHECK(GetGcMap(ref) != NULL);
}
const std::vector<uint8_t>* DexVerifier::GetGcMap(Compiler::MethodReference ref) {
- MutexLock mu(gc_maps_lock_);
- GcMapTable::const_iterator it = gc_maps_.find(ref);
- if (it == gc_maps_.end()) {
+ MutexLock mu(*gc_maps_lock_);
+ GcMapTable::const_iterator it = gc_maps_->find(ref);
+ if (it == gc_maps_->end()) {
return NULL;
}
CHECK(it->second != NULL);
return it->second;
}
-void DexVerifier::DeleteGcMaps() {
- MutexLock mu(gc_maps_lock_);
- STLDeleteValues(&gc_maps_);
+static Mutex& GetRejectedClassesLock() {
+ static Mutex rejected_classes_lock("verifier rejected classes lock");
+ return rejected_classes_lock;
}
-Mutex DexVerifier::rejected_classes_lock_("verifier rejected classes lock");
-std::set<Compiler::ClassReference> DexVerifier::rejected_classes_;
+static std::set<Compiler::ClassReference>& GetRejectedClasses() {
+ static std::set<Compiler::ClassReference> rejected_classes;
+ return rejected_classes;
+}
void DexVerifier::AddRejectedClass(Compiler::ClassReference ref) {
- MutexLock mu(rejected_classes_lock_);
- rejected_classes_.insert(ref);
+ MutexLock mu(GetRejectedClassesLock());
+ GetRejectedClasses().insert(ref);
CHECK(IsClassRejected(ref));
}
bool DexVerifier::IsClassRejected(Compiler::ClassReference ref) {
- MutexLock mu(rejected_classes_lock_);
- return (rejected_classes_.find(ref) != rejected_classes_.end());
+ MutexLock mu(GetRejectedClassesLock());
+ std::set<Compiler::ClassReference>& rejected_classes(GetRejectedClasses());
+ return (rejected_classes.find(ref) != rejected_classes.end());
}
#if defined(ART_USE_LLVM_COMPILER)
diff --git a/src/dex_verifier.h b/src/dex_verifier.h
index c2785c6..17826e7 100644
--- a/src/dex_verifier.h
+++ b/src/dex_verifier.h
@@ -910,6 +910,7 @@
void Dump(std::ostream& os);
static const std::vector<uint8_t>* GetGcMap(Compiler::MethodReference ref);
+ static void InitGcMaps();
static void DeleteGcMaps();
static bool IsClassRejected(Compiler::ClassReference ref);
@@ -1289,15 +1290,12 @@
return insn_flags_[work_insn_idx_];
}
- typedef std::map<const Compiler::MethodReference, const std::vector<uint8_t>*> GcMapTable;
// All the GC maps that the verifier has created
- static Mutex gc_maps_lock_;
- static GcMapTable gc_maps_;
+ typedef std::map<const Compiler::MethodReference, const std::vector<uint8_t>*> GcMapTable;
+ static Mutex* gc_maps_lock_;
+ static GcMapTable* gc_maps_;
static void SetGcMap(Compiler::MethodReference ref, const std::vector<uint8_t>& gc_map);
- // Set of rejected classes that skip compilation
- static Mutex rejected_classes_lock_;
- static std::set<Compiler::ClassReference> rejected_classes_;
static void AddRejectedClass(Compiler::ClassReference ref);
RegTypeCache reg_types_;
diff --git a/src/heap.cc b/src/heap.cc
index e153214..b9e41c0 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -29,6 +29,7 @@
#include "object.h"
#include "object_utils.h"
#include "os.h"
+#include "scoped_heap_lock.h"
#include "space.h"
#include "stl_util.h"
#include "thread_list.h"
@@ -37,39 +38,6 @@
namespace art {
-std::vector<Space*> Heap::spaces_;
-
-AllocSpace* Heap::alloc_space_ = NULL;
-
-size_t Heap::num_bytes_allocated_ = 0;
-
-size_t Heap::num_objects_allocated_ = 0;
-
-bool Heap::is_gc_running_ = false;
-
-HeapBitmap* Heap::mark_bitmap_ = NULL;
-
-HeapBitmap* Heap::live_bitmap_ = NULL;
-
-CardTable* Heap::card_table_ = NULL;
-
-bool Heap::card_marking_disabled_ = false;
-
-Class* Heap::java_lang_ref_FinalizerReference_ = NULL;
-Class* Heap::java_lang_ref_ReferenceQueue_ = NULL;
-
-MemberOffset Heap::reference_referent_offset_ = MemberOffset(0);
-MemberOffset Heap::reference_queue_offset_ = MemberOffset(0);
-MemberOffset Heap::reference_queueNext_offset_ = MemberOffset(0);
-MemberOffset Heap::reference_pendingNext_offset_ = MemberOffset(0);
-MemberOffset Heap::finalizer_reference_zombie_offset_ = MemberOffset(0);
-
-float Heap::target_utilization_ = 0.5;
-
-Mutex* Heap::lock_ = NULL;
-
-bool Heap::verify_objects_ = false;
-
static void UpdateFirstAndLastSpace(Space** first_space, Space** last_space, Space* space) {
if (*first_space == NULL) {
*first_space = space;
@@ -161,10 +129,29 @@
return true;
}
-void Heap::Init(size_t initial_size, size_t growth_limit, size_t capacity,
- const std::string& original_image_file_name) {
+Heap::Heap(size_t initial_size, size_t growth_limit, size_t capacity,
+ const std::string& original_image_file_name)
+ : lock_(NULL),
+ alloc_space_(NULL),
+ mark_bitmap_(NULL),
+ live_bitmap_(NULL),
+ card_table_(NULL),
+ card_marking_disabled_(false),
+ is_gc_running_(false),
+ num_bytes_allocated_(0),
+ num_objects_allocated_(0),
+ java_lang_ref_FinalizerReference_(NULL),
+ java_lang_ref_ReferenceQueue_(NULL),
+ reference_referent_offset_(0),
+ reference_queue_offset_(0),
+ reference_queueNext_offset_(0),
+ reference_pendingNext_offset_(0),
+ finalizer_reference_zombie_offset_(0),
+ target_utilization_(0.5),
+ verify_objects_(false)
+{
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
- LOG(INFO) << "Heap::Init entering";
+ LOG(INFO) << "Heap() entering";
}
// Compute the bounds of all spaces for allocating live and mark bitmaps
@@ -228,7 +215,7 @@
}
// Mark image objects in the live bitmap
- for (size_t i = 0; i < spaces_.size(); i++) {
+ for (size_t i = 0; i < spaces_.size(); ++i) {
Space* space = spaces_[i];
if (space->IsImageSpace()) {
space->AsImageSpace()->RecordImageAllocations(live_bitmap.get());
@@ -254,7 +241,7 @@
num_bytes_allocated_ = 0;
num_objects_allocated_ = 0;
- // It's still to early to take a lock because there are no threads yet,
+ // It's still too early to take a lock because there are no threads yet,
// but we can create the heap lock now. We don't create it earlier to
// make it clear that you can't use locks during heap initialization.
lock_ = new Mutex("Heap lock", kHeapLock);
@@ -262,11 +249,16 @@
Heap::EnableObjectValidation();
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
- LOG(INFO) << "Heap::Init exiting";
+ LOG(INFO) << "Heap() exiting";
}
}
-void Heap::Destroy() {
+void Heap::AddSpace(Space* space) {
+ spaces_.push_back(space);
+}
+
+Heap::~Heap() {
+ VLOG(heap) << "~Heap()";
// We can't take the heap lock here because there might be a daemon thread suspended with the
// heap lock held. We know though that no non-daemon threads are executing, and we know that
// all daemon threads are suspended, and we also know that the threads list have been deleted, so
@@ -305,7 +297,7 @@
if (obj == NULL || !IsAligned<kObjectAlignment>(obj)) {
return false;
}
- for (size_t i = 0; i < spaces_.size(); i++) {
+ for (size_t i = 0; i < spaces_.size(); ++i) {
if (spaces_[i]->Contains(obj)) {
return true;
}
@@ -362,12 +354,12 @@
void Heap::VerificationCallback(Object* obj, void* arg) {
DCHECK(obj != NULL);
- Heap::VerifyObjectLocked(obj);
+ reinterpret_cast<Heap*>(arg)->VerifyObjectLocked(obj);
}
void Heap::VerifyHeap() {
ScopedHeapLock heap_lock;
- live_bitmap_->Walk(Heap::VerificationCallback, NULL);
+ live_bitmap_->Walk(Heap::VerificationCallback, this);
}
void Heap::RecordAllocationLocked(AllocSpace* space, const Object* obj) {
diff --git a/src/heap.h b/src/heap.h
index 2fd381c..ccc2d23 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -17,6 +17,7 @@
#ifndef ART_SRC_HEAP_H_
#define ART_SRC_HEAP_H_
+#include <string>
#include <vector>
#include "card_table.h"
@@ -50,57 +51,58 @@
// Create a heap with the requested sizes. The possible empty
// image_file_names names specify Spaces to load based on
// ImageWriter output.
- static void Init(size_t starting_size, size_t growth_limit, size_t capacity,
- const std::string& image_file_name);
+ explicit Heap(size_t starting_size, size_t growth_limit, size_t capacity,
+ const std::string& image_file_name);
- static void Destroy();
+ ~Heap();
// Allocates and initializes storage for an object instance.
- static Object* AllocObject(Class* klass, size_t num_bytes);
+ Object* AllocObject(Class* klass, size_t num_bytes);
// Check sanity of given reference. Requires the heap lock.
#if VERIFY_OBJECT_ENABLED
- static void VerifyObject(const Object *obj);
+ void VerifyObject(const Object *obj);
#else
- static void VerifyObject(const Object *obj) {}
+ void VerifyObject(const Object *obj) {}
#endif
// Check sanity of all live references. Requires the heap lock.
- static void VerifyHeap();
+ void VerifyHeap();
// A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
// and doesn't abort on error, allowing the caller to report more
// meaningful diagnostics.
- static bool IsHeapAddress(const Object* obj);
+ bool IsHeapAddress(const Object* obj);
+
// Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
// Requires the heap lock to be held.
- static bool IsLiveObjectLocked(const Object* obj);
+ bool IsLiveObjectLocked(const Object* obj);
// Initiates an explicit garbage collection.
- static void CollectGarbage(bool clear_soft_references);
+ void CollectGarbage(bool clear_soft_references);
// Implements java.lang.Runtime.maxMemory.
- static int64_t GetMaxMemory();
+ int64_t GetMaxMemory();
// Implements java.lang.Runtime.totalMemory.
- static int64_t GetTotalMemory();
+ int64_t GetTotalMemory();
// Implements java.lang.Runtime.freeMemory.
- static int64_t GetFreeMemory();
+ int64_t GetFreeMemory();
// Implements VMDebug.countInstancesOfClass.
- static int64_t CountInstances(Class* c, bool count_assignable);
+ int64_t CountInstances(Class* c, bool count_assignable);
// Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to
// implement dalvik.system.VMRuntime.clearGrowthLimit.
- static void ClearGrowthLimit();
+ void ClearGrowthLimit();
// Target ideal heap utilization ratio, implements
// dalvik.system.VMRuntime.getTargetHeapUtilization.
- static float GetTargetHeapUtilization() {
+ float GetTargetHeapUtilization() {
return target_utilization_;
}
// Set target ideal heap utilization ratio, implements
// dalvik.system.VMRuntime.setTargetHeapUtilization.
- static void SetTargetHeapUtilization(float target) {
+ void SetTargetHeapUtilization(float target) {
DCHECK_GT(target, 0.0f); // asserted in Java code
DCHECK_LT(target, 1.0f);
target_utilization_ = target;
@@ -108,186 +110,182 @@
// For the alloc space, sets the maximum number of bytes that the heap is allowed to allocate
// from the system. Doesn't allow the space to exceed its growth limit.
- static void SetIdealFootprint(size_t max_allowed_footprint);
+ void SetIdealFootprint(size_t max_allowed_footprint);
// Blocks the caller until the garbage collector becomes idle.
- static void WaitForConcurrentGcToComplete();
+ void WaitForConcurrentGcToComplete();
- static pid_t GetLockOwner(); // For SignalCatcher.
- static void Lock();
- static void Unlock();
- static void AssertLockHeld() {
+ pid_t GetLockOwner(); // For SignalCatcher.
+ void Lock();
+ void Unlock();
+ void AssertLockHeld() {
lock_->AssertHeld();
}
- static void AssertLockNotHeld() {
+ void AssertLockNotHeld() {
lock_->AssertNotHeld();
}
- static const std::vector<Space*>& GetSpaces() {
+ const std::vector<Space*>& GetSpaces() {
return spaces_;
}
- static HeapBitmap* GetLiveBits() {
+ HeapBitmap* GetLiveBits() {
return live_bitmap_;
}
- static HeapBitmap* GetMarkBits() {
+ HeapBitmap* GetMarkBits() {
return mark_bitmap_;
}
- static void SetWellKnownClasses(Class* java_lang_ref_FinalizerReference,
+ void SetWellKnownClasses(Class* java_lang_ref_FinalizerReference,
Class* java_lang_ref_ReferenceQueue);
- static void SetReferenceOffsets(MemberOffset reference_referent_offset,
- MemberOffset reference_queue_offset,
- MemberOffset reference_queueNext_offset,
- MemberOffset reference_pendingNext_offset,
- MemberOffset finalizer_reference_zombie_offset);
+ void SetReferenceOffsets(MemberOffset reference_referent_offset,
+ MemberOffset reference_queue_offset,
+ MemberOffset reference_queueNext_offset,
+ MemberOffset reference_pendingNext_offset,
+ MemberOffset finalizer_reference_zombie_offset);
- static Object* GetReferenceReferent(Object* reference);
- static void ClearReferenceReferent(Object* reference);
+ Object* GetReferenceReferent(Object* reference);
+ void ClearReferenceReferent(Object* reference);
// Returns true if the reference object has not yet been enqueued.
- static bool IsEnqueuable(const Object* ref);
- static void EnqueueReference(Object* ref, Object** list);
- static void EnqueuePendingReference(Object* ref, Object** list);
- static Object* DequeuePendingReference(Object** list);
+ bool IsEnqueuable(const Object* ref);
+ void EnqueueReference(Object* ref, Object** list);
+ void EnqueuePendingReference(Object* ref, Object** list);
+ Object* DequeuePendingReference(Object** list);
- static MemberOffset GetReferencePendingNextOffset() {
+ MemberOffset GetReferencePendingNextOffset() {
DCHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);
return reference_pendingNext_offset_;
}
- static MemberOffset GetFinalizerReferenceZombieOffset() {
+ MemberOffset GetFinalizerReferenceZombieOffset() {
DCHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
return finalizer_reference_zombie_offset_;
}
- static void EnableObjectValidation() {
+ void EnableObjectValidation() {
#if VERIFY_OBJECT_ENABLED
- Heap::VerifyHeap();
+ VerifyHeap();
#endif
verify_objects_ = true;
}
- static void DisableObjectValidation() {
+ void DisableObjectValidation() {
verify_objects_ = false;
}
// Callers must hold the heap lock.
- static void RecordFreeLocked(size_t freed_objects, size_t freed_bytes);
+ void RecordFreeLocked(size_t freed_objects, size_t freed_bytes);
// Must be called if a field of an Object in the heap changes, and before any GC safe-point.
// The call is not needed if NULL is stored in the field.
- static void WriteBarrierField(const Object* dest, MemberOffset offset, const Object* new_val) {
+ void WriteBarrierField(const Object* dest, MemberOffset offset, const Object* new_val) {
if (!card_marking_disabled_) {
card_table_->MarkCard(dest);
}
}
// Write barrier for array operations that update many field positions
- static void WriteBarrierArray(const Object* dest, int pos, size_t len) {
+ void WriteBarrierArray(const Object* dest, int pos, size_t len) {
if (UNLIKELY(!card_marking_disabled_)) {
card_table_->MarkCard(dest);
}
}
- static CardTable* GetCardTable() {
+ CardTable* GetCardTable() {
return card_table_;
}
- static void DisableCardMarking() {
+ void DisableCardMarking() {
// TODO: we shouldn't need to disable card marking, this is here to help the image_writer
card_marking_disabled_ = true;
}
- static void AddFinalizerReference(Thread* self, Object* object);
+ void AddFinalizerReference(Thread* self, Object* object);
- static size_t GetBytesAllocated() { return num_bytes_allocated_; }
- static size_t GetObjectsAllocated() { return num_objects_allocated_; }
+ size_t GetBytesAllocated() { return num_bytes_allocated_; }
+ size_t GetObjectsAllocated() { return num_objects_allocated_; }
- static AllocSpace* GetAllocSpace() {
+ AllocSpace* GetAllocSpace() {
return alloc_space_;
}
private:
// Allocates uninitialized storage.
- static Object* AllocateLocked(size_t num_bytes);
- static Object* AllocateLocked(AllocSpace* space, size_t num_bytes);
+ Object* AllocateLocked(size_t num_bytes);
+ Object* AllocateLocked(AllocSpace* space, size_t num_bytes);
// Pushes a list of cleared references out to the managed heap.
- static void EnqueueClearedReferences(Object** cleared_references);
+ void EnqueueClearedReferences(Object** cleared_references);
- static void RequestHeapTrim();
+ void RequestHeapTrim();
- static void RecordAllocationLocked(AllocSpace* space, const Object* object);
- static void RecordImageAllocations(Space* space);
+ void RecordAllocationLocked(AllocSpace* space, const Object* object);
+ void RecordImageAllocations(Space* space);
- static void CollectGarbageInternal(bool clear_soft_references);
+ void CollectGarbageInternal(bool clear_soft_references);
// Given the current contents of the alloc space, increase the allowed heap footprint to match
// the target utilization ratio. This should only be called immediately after a full garbage
// collection.
- static void GrowForUtilization();
+ void GrowForUtilization();
- static void AddSpace(Space* space) {
- spaces_.push_back(space);
- }
+ void AddSpace(Space* space);
- static void VerifyObjectLocked(const Object *obj);
+ void VerifyObjectLocked(const Object *obj);
static void VerificationCallback(Object* obj, void* arg);
- static Mutex* lock_;
+ Mutex* lock_;
- static std::vector<Space*> spaces_;
+ std::vector<Space*> spaces_;
// default Space for allocations
- static AllocSpace* alloc_space_;
+ AllocSpace* alloc_space_;
- static HeapBitmap* mark_bitmap_;
+ HeapBitmap* mark_bitmap_;
- static HeapBitmap* live_bitmap_;
+ HeapBitmap* live_bitmap_;
- static CardTable* card_table_;
+ CardTable* card_table_;
// Used by the image writer to disable card marking on copied objects
// TODO: remove
- static bool card_marking_disabled_;
+ bool card_marking_disabled_;
// True while the garbage collector is running.
- static bool is_gc_running_;
+ bool is_gc_running_;
- // Number of bytes allocated. Adjusted after each allocation and
- // free.
- static size_t num_bytes_allocated_;
+ // Number of bytes allocated. Adjusted after each allocation and free.
+ size_t num_bytes_allocated_;
- // Number of objects allocated. Adjusted after each allocation and
- // free.
- static size_t num_objects_allocated_;
+ // Number of objects allocated. Adjusted after each allocation and free.
+ size_t num_objects_allocated_;
- static Class* java_lang_ref_FinalizerReference_;
- static Class* java_lang_ref_ReferenceQueue_;
+ Class* java_lang_ref_FinalizerReference_;
+ Class* java_lang_ref_ReferenceQueue_;
// offset of java.lang.ref.Reference.referent
- static MemberOffset reference_referent_offset_;
+ MemberOffset reference_referent_offset_;
// offset of java.lang.ref.Reference.queue
- static MemberOffset reference_queue_offset_;
+ MemberOffset reference_queue_offset_;
// offset of java.lang.ref.Reference.queueNext
- static MemberOffset reference_queueNext_offset_;
+ MemberOffset reference_queueNext_offset_;
// offset of java.lang.ref.Reference.pendingNext
- static MemberOffset reference_pendingNext_offset_;
+ MemberOffset reference_pendingNext_offset_;
// offset of java.lang.ref.FinalizerReference.zombie
- static MemberOffset finalizer_reference_zombie_offset_;
+ MemberOffset finalizer_reference_zombie_offset_;
// Target ideal heap utilization ratio
- static float target_utilization_;
+ float target_utilization_;
- static bool verify_objects_;
+ bool verify_objects_;
FRIEND_TEST(SpaceTest, AllocAndFree);
FRIEND_TEST(SpaceTest, AllocAndFreeList);
@@ -296,20 +294,6 @@
DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
};
-class ScopedHeapLock {
- public:
- ScopedHeapLock() {
- Heap::Lock();
- }
-
- ~ScopedHeapLock() {
- Heap::Unlock();
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ScopedHeapLock);
-};
-
} // namespace art
#endif // ART_SRC_HEAP_H_
diff --git a/src/heap_test.cc b/src/heap_test.cc
index 2035ce8..0077a77 100644
--- a/src/heap_test.cc
+++ b/src/heap_test.cc
@@ -21,11 +21,12 @@
class HeapTest : public CommonTest {};
TEST_F(HeapTest, ClearGrowthLimit) {
- int64_t max_memory_before = Heap::GetMaxMemory();
- int64_t total_memory_before = Heap::GetTotalMemory();
- Heap::ClearGrowthLimit();
- int64_t max_memory_after = Heap::GetMaxMemory();
- int64_t total_memory_after = Heap::GetTotalMemory();
+ Heap* heap = Runtime::Current()->GetHeap();
+ int64_t max_memory_before = heap->GetMaxMemory();
+ int64_t total_memory_before = heap->GetTotalMemory();
+ heap->ClearGrowthLimit();
+ int64_t max_memory_after = heap->GetMaxMemory();
+ int64_t total_memory_after = heap->GetTotalMemory();
EXPECT_GE(max_memory_after, max_memory_before);
EXPECT_GE(total_memory_after, total_memory_before);
}
@@ -41,7 +42,7 @@
}
}
- Heap::CollectGarbage(false);
+ Runtime::Current()->GetHeap()->CollectGarbage(false);
}
} // namespace art
diff --git a/src/hprof/hprof.cc b/src/hprof/hprof.cc
index 4d5cae1..6de9c64 100644
--- a/src/hprof/hprof.cc
+++ b/src/hprof/hprof.cc
@@ -30,6 +30,7 @@
#include "logging.h"
#include "object.h"
#include "object_utils.h"
+#include "scoped_heap_lock.h"
#include "stringprintf.h"
#include <cutils/open_memstream.h>
@@ -763,9 +764,10 @@
ThreadList* thread_list = Runtime::Current()->GetThreadList();
thread_list->SuspendAll();
+ Runtime* runtime = Runtime::Current();
Hprof hprof(fileName, fd, false, directToDdms);
- Runtime::Current()->VisitRoots(HprofRootVisitor, &hprof);
- Heap::GetLiveBits()->Walk(HprofBitmapCallback, &hprof);
+ runtime->VisitRoots(HprofRootVisitor, &hprof);
+ runtime->GetHeap()->GetLiveBits()->Walk(HprofBitmapCallback, &hprof);
// TODO: write a HEAP_SUMMARY record
int success = hprof.Finish() ? 0 : -1;
thread_list->ResumeAll();
diff --git a/src/image_test.cc b/src/image_test.cc
index fb87a49..e886e49 100644
--- a/src/image_test.cc
+++ b/src/image_test.cc
@@ -38,7 +38,7 @@
ASSERT_TRUE(success_oat);
// Force all system classes into memory
- for (size_t i = 0; i < java_lang_dex_file_->NumClassDefs(); i++) {
+ for (size_t i = 0; i < java_lang_dex_file_->NumClassDefs(); ++i) {
const DexFile::ClassDef& class_def = java_lang_dex_file_->GetClassDef(i);
const char* descriptor = java_lang_dex_file_->GetClassDescriptor(class_def);
Class* klass = class_linker_->FindSystemClass(descriptor);
@@ -59,8 +59,9 @@
file->ReadFully(&image_header, sizeof(image_header));
ASSERT_TRUE(image_header.IsValid());
- ASSERT_EQ(1U, Heap::GetSpaces().size());
- Space* space = Heap::GetSpaces()[0];
+ Heap* heap = Runtime::Current()->GetHeap();
+ ASSERT_EQ(1U, heap->GetSpaces().size());
+ Space* space = heap->GetSpaces()[0];
ASSERT_FALSE(space->IsImageSpace());
ASSERT_TRUE(space != NULL);
ASSERT_GE(sizeof(image_header) + space->Size(), static_cast<size_t>(file->Length()));
@@ -84,15 +85,16 @@
ASSERT_TRUE(runtime_->GetJniDlsymLookupStub() != NULL);
- ASSERT_EQ(2U, Heap::GetSpaces().size());
- ASSERT_TRUE(Heap::GetSpaces()[0]->IsImageSpace());
- ASSERT_FALSE(Heap::GetSpaces()[1]->IsImageSpace());
+ Heap* heap = Runtime::Current()->GetHeap();
+ ASSERT_EQ(2U, heap->GetSpaces().size());
+ ASSERT_TRUE(heap->GetSpaces()[0]->IsImageSpace());
+ ASSERT_FALSE(heap->GetSpaces()[1]->IsImageSpace());
- ImageSpace* image_space = Heap::GetSpaces()[0]->AsImageSpace();
+ ImageSpace* image_space = heap->GetSpaces()[0]->AsImageSpace();
byte* image_begin = image_space->Begin();
byte* image_end = image_space->End();
CHECK_EQ(requested_image_base, reinterpret_cast<uintptr_t>(image_begin));
- for (size_t i = 0; i < dex->NumClassDefs(); i++) {
+ for (size_t i = 0; i < dex->NumClassDefs(); ++i) {
const DexFile::ClassDef& class_def = dex->GetClassDef(i);
const char* descriptor = dex->GetClassDescriptor(class_def);
Class* klass = class_linker_->FindSystemClass(descriptor);
diff --git a/src/image_writer.cc b/src/image_writer.cc
index 23c2029..4579c0c 100644
--- a/src/image_writer.cc
+++ b/src/image_writer.cc
@@ -39,8 +39,6 @@
namespace art {
-std::map<const Object*, size_t> ImageWriter::offsets_;
-
bool ImageWriter::Write(const std::string& image_filename,
uintptr_t image_begin,
const std::string& oat_filename,
@@ -50,7 +48,8 @@
CHECK_NE(image_begin, 0U);
image_begin_ = reinterpret_cast<byte*>(image_begin);
- const std::vector<Space*>& spaces = Heap::GetSpaces();
+ Heap* heap = Runtime::Current()->GetHeap();
+ const std::vector<Space*>& spaces = heap->GetSpaces();
// currently just write the last space, assuming it is the space that was being used for allocation
CHECK_GE(spaces.size(), 1U);
source_space_ = spaces[spaces.size()-1];
@@ -74,15 +73,15 @@
PruneNonImageClasses(); // Remove junk
ComputeLazyFieldsForImageClasses(); // Add useful information
ComputeEagerResolvedStrings();
- Heap::CollectGarbage(false); // Remove garbage
- Heap::GetAllocSpace()->Trim(); // Trim size of source_space
+ heap->CollectGarbage(false); // Remove garbage
+ heap->GetAllocSpace()->Trim(); // Trim size of source_space
if (!AllocMemory()) {
return false;
}
#ifndef NDEBUG
CheckNonImageClassesRemoved();
#endif
- Heap::DisableCardMarking();
+ heap->DisableCardMarking();
CalculateNewObjectOffsets();
CopyAndFixupObjects();
@@ -147,7 +146,7 @@
}
void ImageWriter::ComputeEagerResolvedStrings() {
- HeapBitmap* heap_bitmap = Heap::GetLiveBits();
+ HeapBitmap* heap_bitmap = Runtime::Current()->GetHeap()->GetLiveBits();
DCHECK(heap_bitmap != NULL);
heap_bitmap->Walk(ComputeEagerResolvedStringsCallback, this); // TODO: add Space-limited Walk
}
@@ -228,7 +227,7 @@
if (image_classes_ == NULL) {
return;
}
- Heap::GetLiveBits()->Walk(CheckNonImageClassesRemovedCallback, this);
+ Runtime::Current()->GetHeap()->GetLiveBits()->Walk(CheckNonImageClassesRemovedCallback, this);
}
void ImageWriter::CheckNonImageClassesRemovedCallback(Object* obj, void* arg) {
@@ -262,18 +261,18 @@
// if it is a string, we want to intern it if its not interned.
if (obj->GetClass()->IsStringClass()) {
// we must be an interned string that was forward referenced and already assigned
- if (IsImageOffsetAssigned(obj)) {
+ if (image_writer->IsImageOffsetAssigned(obj)) {
DCHECK_EQ(obj, obj->AsString()->Intern());
return;
}
SirtRef<String> interned(obj->AsString()->Intern());
if (obj != interned.get()) {
- if (!IsImageOffsetAssigned(interned.get())) {
+ if (!image_writer->IsImageOffsetAssigned(interned.get())) {
// interned obj is after us, allocate its location early
image_writer->AssignImageOffset(interned.get());
}
// point those looking for this object to the interned version.
- SetImageOffset(obj, GetImageOffset(interned.get()));
+ image_writer->SetImageOffset(obj, image_writer->GetImageOffset(interned.get()));
return;
}
// else (obj == interned), nothing to do but fall through to the normal case
@@ -328,7 +327,7 @@
void ImageWriter::CalculateNewObjectOffsets() {
SirtRef<ObjectArray<Object> > image_roots(CreateImageRoots());
- HeapBitmap* heap_bitmap = Heap::GetLiveBits();
+ HeapBitmap* heap_bitmap = Runtime::Current()->GetHeap()->GetLiveBits();
DCHECK(heap_bitmap != NULL);
DCHECK_EQ(0U, image_end_);
@@ -353,10 +352,11 @@
}
void ImageWriter::CopyAndFixupObjects() {
- HeapBitmap* heap_bitmap = Heap::GetLiveBits();
+ Heap* heap = Runtime::Current()->GetHeap();
+ HeapBitmap* heap_bitmap = heap->GetLiveBits();
DCHECK(heap_bitmap != NULL);
// TODO: heap validation can't handle this fix up pass
- Heap::DisableObjectValidation();
+ heap->DisableObjectValidation();
heap_bitmap->Walk(CopyAndFixupObjectsCallback, this); // TODO: add Space-limited Walk
}
diff --git a/src/image_writer.h b/src/image_writer.h
index 9d46c06..35a3c02 100644
--- a/src/image_writer.h
+++ b/src/image_writer.h
@@ -51,8 +51,6 @@
bool AllocMemory();
- static std::map<const Object*, size_t> offsets_;
-
// we use the lock word to store the offset of the object in the image
void AssignImageOffset(Object* object) {
DCHECK(object != NULL);
@@ -60,26 +58,30 @@
image_end_ += RoundUp(object->SizeOf(), 8); // 64-bit alignment
DCHECK_LT(image_end_, image_->Size());
}
- static void SetImageOffset(Object* object, size_t offset) {
+
+ void SetImageOffset(Object* object, size_t offset) {
DCHECK(object != NULL);
DCHECK_NE(offset, 0U);
DCHECK(!IsImageOffsetAssigned(object));
offsets_[object] = offset;
}
- static size_t IsImageOffsetAssigned(const Object* object) {
+
+ size_t IsImageOffsetAssigned(const Object* object) const {
DCHECK(object != NULL);
return offsets_.find(object) != offsets_.end();
}
- static size_t GetImageOffset(const Object* object) {
+
+ size_t GetImageOffset(const Object* object) const {
DCHECK(object != NULL);
DCHECK(IsImageOffsetAssigned(object));
- return offsets_[object];
+ return offsets_.find(object)->second;
}
bool InSourceSpace(const Object* object) const {
DCHECK(source_space_ != NULL);
return source_space_->Contains(object);
}
+
Object* GetImageAddress(const Object* object) const {
if (object == NULL) {
return NULL;
@@ -90,6 +92,7 @@
}
return reinterpret_cast<Object*>(image_begin_ + GetImageOffset(object));
}
+
Object* GetLocalAddress(const Object* object) const {
size_t offset = GetImageOffset(object);
byte* dst = image_->Begin() + offset;
@@ -134,6 +137,8 @@
void FixupStaticFields(const Class* orig, Class* copy);
void FixupFields(const Object* orig, Object* copy, uint32_t ref_offsets, bool is_static);
+ std::map<const Object*, size_t> offsets_;
+
// oat file with code for this image
UniquePtr<OatFile> oat_file_;
diff --git a/src/indirect_reference_table.h b/src/indirect_reference_table.h
index f6cab95..78617f8 100644
--- a/src/indirect_reference_table.h
+++ b/src/indirect_reference_table.h
@@ -96,7 +96,7 @@
*/
typedef void* IndirectRef;
-/* Magic failure values; must not pass Heap::ValidateObject() or Heap::IsHeapAddress(). */
+// Magic failure values; must not pass Heap::ValidateObject() or Heap::IsHeapAddress().
static Object* const kInvalidIndirectRefObject = reinterpret_cast<Object*>(0xdead4321);
static Object* const kClearedJniWeakGlobal = reinterpret_cast<Object*>(0xdead1234);
@@ -327,6 +327,7 @@
static Offset SegmentStateOffset() {
return Offset(OFFSETOF_MEMBER(IndirectReferenceTable, segment_state_));
}
+
private:
/*
* Extract the table index from an indirect reference.
diff --git a/src/java_lang_Runtime.cc b/src/java_lang_Runtime.cc
index 96337af..07af162 100644
--- a/src/java_lang_Runtime.cc
+++ b/src/java_lang_Runtime.cc
@@ -31,7 +31,7 @@
void Runtime_gc(JNIEnv*, jclass) {
ScopedThreadStateChange tsc(Thread::Current(), Thread::kRunnable);
- Heap::CollectGarbage(false);
+ Runtime::Current()->GetHeap()->CollectGarbage(false);
}
void Runtime_nativeExit(JNIEnv* env, jclass, jint status, jboolean isExit) {
@@ -67,15 +67,15 @@
}
jlong Runtime_maxMemory(JNIEnv* env, jclass) {
- return Heap::GetMaxMemory();
+ return Runtime::Current()->GetHeap()->GetMaxMemory();
}
jlong Runtime_totalMemory(JNIEnv* env, jclass) {
- return Heap::GetTotalMemory();
+ return Runtime::Current()->GetHeap()->GetTotalMemory();
}
jlong Runtime_freeMemory(JNIEnv* env, jclass) {
- return Heap::GetFreeMemory();
+ return Runtime::Current()->GetHeap()->GetFreeMemory();
}
static JNINativeMethod gMethods[] = {
diff --git a/src/java_lang_System.cc b/src/java_lang_System.cc
index b48cee9..22a9cc6 100644
--- a/src/java_lang_System.cc
+++ b/src/java_lang_System.cc
@@ -192,7 +192,7 @@
// Yes. Bulk copy.
COMPILE_ASSERT(sizeof(width) == sizeof(uint32_t), move32_assumes_Object_references_are_32_bit);
move32(dstBytes + dstPos * width, srcBytes + srcPos * width, length * width);
- Heap::WriteBarrierArray(dstArray, dstPos, length);
+ Runtime::Current()->GetHeap()->WriteBarrierArray(dstArray, dstPos, length);
return;
}
@@ -233,7 +233,7 @@
}
}
- Heap::WriteBarrierArray(dstArray, dstPos, length);
+ Runtime::Current()->GetHeap()->WriteBarrierArray(dstArray, dstPos, length);
if (i != length) {
std::string actualSrcType(PrettyTypeOf(o));
std::string dstType(PrettyTypeOf(dstArray));
diff --git a/src/logging.cc b/src/logging.cc
index a4fa45b..18fe99b 100644
--- a/src/logging.cc
+++ b/src/logging.cc
@@ -24,9 +24,9 @@
LogVerbosity gLogVerbosity;
-Mutex& GetLoggingLock() {
- static Mutex lock("LogMessage lock");
- return lock;
+static Mutex& GetLoggingLock() {
+ static Mutex logging_lock("LogMessage lock");
+ return logging_lock;
}
LogMessage::~LogMessage() {
diff --git a/src/mark_sweep.cc b/src/mark_sweep.cc
index 952bf85..e5e0835 100644
--- a/src/mark_sweep.cc
+++ b/src/mark_sweep.cc
@@ -38,8 +38,10 @@
void MarkSweep::Init() {
mark_stack_ = MarkStack::Create();
- mark_bitmap_ = Heap::GetMarkBits();
- live_bitmap_ = Heap::GetLiveBits();
+
+ heap_ = Runtime::Current()->GetHeap();
+ mark_bitmap_ = heap_->GetMarkBits();
+ live_bitmap_ = heap_->GetLiveBits();
// TODO: if concurrent, clear the card table.
@@ -100,8 +102,8 @@
// Marks all objects that are in images and have been touched by the mutator
void MarkSweep::ScanDirtyImageRoots() {
- const std::vector<Space*>& spaces = Heap::GetSpaces();
- CardTable* card_table = Heap::GetCardTable();
+ const std::vector<Space*>& spaces = heap_->GetSpaces();
+ CardTable* card_table = heap_->GetCardTable();
for (size_t i = 0; i < spaces.size(); ++i) {
if (spaces[i]->IsImageSpace()) {
byte* begin = spaces[i]->Begin();
@@ -135,7 +137,7 @@
CHECK(cleared_reference_list_ == NULL);
void* arg = reinterpret_cast<void*>(this);
- const std::vector<Space*>& spaces = Heap::GetSpaces();
+ const std::vector<Space*>& spaces = heap_->GetSpaces();
for (size_t i = 0; i < spaces.size(); ++i) {
#ifndef NDEBUG
uintptr_t begin = reinterpret_cast<uintptr_t>(spaces[i]->Begin());
@@ -181,11 +183,18 @@
SweepJniWeakGlobals();
}
+struct SweepCallbackContext {
+ Heap* heap;
+ AllocSpace* space;
+};
+
void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
// TODO: lock heap if concurrent
size_t freed_objects = num_ptrs;
size_t freed_bytes = 0;
- AllocSpace* space = static_cast<AllocSpace*>(arg);
+ SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
+ Heap* heap = context->heap;
+ AllocSpace* space = context->space;
// Use a bulk free, that merges consecutive objects before freeing or free per object?
// Documentation suggests better free performance with merging, but this may be at the expensive
// of allocation.
@@ -195,7 +204,7 @@
for (size_t i = 0; i < num_ptrs; ++i) {
Object* obj = static_cast<Object*>(ptrs[i]);
freed_bytes += space->AllocationSize(obj);
- Heap::GetLiveBits()->Clear(obj);
+ heap->GetLiveBits()->Clear(obj);
}
// AllocSpace::FreeList clears the value in ptrs, so perform after clearing the live bit
space->FreeList(num_ptrs, ptrs);
@@ -203,25 +212,27 @@
for (size_t i = 0; i < num_ptrs; ++i) {
Object* obj = static_cast<Object*>(ptrs[i]);
freed_bytes += space->AllocationSize(obj);
- Heap::GetLiveBits()->Clear(obj);
+ heap->GetLiveBits()->Clear(obj);
space->Free(obj);
}
}
- Heap::RecordFreeLocked(freed_objects, freed_bytes);
+ heap->RecordFreeLocked(freed_objects, freed_bytes);
// TODO: unlock heap if concurrent
}
void MarkSweep::Sweep() {
SweepSystemWeaks();
- const std::vector<Space*>& spaces = Heap::GetSpaces();
+ const std::vector<Space*>& spaces = heap_->GetSpaces();
+ SweepCallbackContext scc;
+ scc.heap = heap_;
for (size_t i = 0; i < spaces.size(); ++i) {
if (!spaces[i]->IsImageSpace()) {
uintptr_t begin = reinterpret_cast<uintptr_t>(spaces[i]->Begin());
uintptr_t end = reinterpret_cast<uintptr_t>(spaces[i]->End());
- void* arg = static_cast<void*>(spaces[i]);
+ scc.space = spaces[i]->AsAllocSpace();
HeapBitmap::SweepWalk(*live_bitmap_, *mark_bitmap_, begin, end,
- &MarkSweep::SweepCallback, arg);
+ &MarkSweep::SweepCallback, reinterpret_cast<void*>(&scc));
}
}
}
@@ -283,7 +294,7 @@
}
inline void MarkSweep::CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) {
- AllocSpace* alloc_space = Heap::GetAllocSpace();
+ AllocSpace* alloc_space = heap_->GetAllocSpace();
if (alloc_space->Contains(ref)) {
bool is_marked = mark_bitmap_->Test(ref);
if(!is_marked) {
@@ -292,7 +303,7 @@
<< "' (" << (void*)ref << ") in '" << PrettyTypeOf(obj)
<< "' (" << (void*)obj << ") at offset "
<< (void*)offset.Int32Value() << " wasn't marked";
- bool obj_marked = Heap::GetCardTable()->IsDirty(obj);
+ bool obj_marked = heap_->GetCardTable()->IsDirty(obj);
if (!obj_marked) {
LOG(WARNING) << "Object '" << PrettyTypeOf(obj) << "' (" << (void*)obj
<< ") contains references to the alloc space, but wasn't card marked";
@@ -386,8 +397,8 @@
Class* klass = obj->GetClass();
DCHECK(klass != NULL);
DCHECK(klass->IsReferenceClass());
- Object* pending = obj->GetFieldObject<Object*>(Heap::GetReferencePendingNextOffset(), false);
- Object* referent = Heap::GetReferenceReferent(obj);
+ Object* pending = obj->GetFieldObject<Object*>(heap_->GetReferencePendingNextOffset(), false);
+ Object* referent = heap_->GetReferenceReferent(obj);
if (pending == NULL && referent != NULL && !IsMarked(referent)) {
Object** list = NULL;
if (klass->IsSoftReferenceClass()) {
@@ -400,7 +411,7 @@
list = &phantom_reference_list_;
}
DCHECK(list != NULL) << PrettyClass(klass) << " " << std::hex << klass->GetAccessFlags();
- Heap::EnqueuePendingReference(obj, list);
+ heap_->EnqueuePendingReference(obj, list);
}
}
@@ -452,7 +463,7 @@
// Scan anything that's on the mark stack.
void MarkSweep::ProcessMarkStack() {
- Space* alloc_space = Heap::GetAllocSpace();
+ Space* alloc_space = heap_->GetAllocSpace();
while (!mark_stack_->IsEmpty()) {
const Object* obj = mark_stack_->Pop();
if (alloc_space->Contains(obj)) {
@@ -474,8 +485,8 @@
Object* clear = NULL;
size_t counter = 0;
while (*list != NULL) {
- Object* ref = Heap::DequeuePendingReference(list);
- Object* referent = Heap::GetReferenceReferent(ref);
+ Object* ref = heap_->DequeuePendingReference(list);
+ Object* referent = heap_->GetReferenceReferent(ref);
if (referent == NULL) {
// Referent was cleared by the user during marking.
continue;
@@ -488,7 +499,7 @@
}
if (!is_marked) {
// Referent is white, queue it for clearing.
- Heap::EnqueuePendingReference(ref, &clear);
+ heap_->EnqueuePendingReference(ref, &clear);
}
}
*list = clear;
@@ -503,13 +514,13 @@
void MarkSweep::ClearWhiteReferences(Object** list) {
DCHECK(list != NULL);
while (*list != NULL) {
- Object* ref = Heap::DequeuePendingReference(list);
- Object* referent = Heap::GetReferenceReferent(ref);
+ Object* ref = heap_->DequeuePendingReference(list);
+ Object* referent = heap_->GetReferenceReferent(ref);
if (referent != NULL && !IsMarked(referent)) {
// Referent is white, clear it.
- Heap::ClearReferenceReferent(ref);
- if (Heap::IsEnqueuable(ref)) {
- Heap::EnqueueReference(ref, &cleared_reference_list_);
+ heap_->ClearReferenceReferent(ref);
+ if (heap_->IsEnqueuable(ref)) {
+ heap_->EnqueueReference(ref, &cleared_reference_list_);
}
}
}
@@ -521,18 +532,18 @@
// referent field is cleared.
void MarkSweep::EnqueueFinalizerReferences(Object** list) {
DCHECK(list != NULL);
- MemberOffset zombie_offset = Heap::GetFinalizerReferenceZombieOffset();
+ MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset();
bool has_enqueued = false;
while (*list != NULL) {
- Object* ref = Heap::DequeuePendingReference(list);
- Object* referent = Heap::GetReferenceReferent(ref);
+ Object* ref = heap_->DequeuePendingReference(list);
+ Object* referent = heap_->GetReferenceReferent(ref);
if (referent != NULL && !IsMarked(referent)) {
MarkObject(referent);
// If the referent is non-null the reference must queuable.
- DCHECK(Heap::IsEnqueuable(ref));
+ DCHECK(heap_->IsEnqueuable(ref));
ref->SetFieldObject(zombie_offset, referent, false);
- Heap::ClearReferenceReferent(ref);
- Heap::EnqueueReference(ref, &cleared_reference_list_);
+ heap_->ClearReferenceReferent(ref);
+ heap_->EnqueueReference(ref, &cleared_reference_list_);
has_enqueued = true;
}
}
diff --git a/src/mark_sweep.h b/src/mark_sweep.h
index c675675..dce4b52 100644
--- a/src/mark_sweep.h
+++ b/src/mark_sweep.h
@@ -25,11 +25,16 @@
namespace art {
class Class;
+class Heap;
class Object;
class MarkSweep {
public:
MarkSweep() :
+ mark_stack_(NULL),
+ heap_(NULL),
+ mark_bitmap_(NULL),
+ live_bitmap_(NULL),
finger_(NULL),
condemned_(NULL),
soft_reference_list_(NULL),
@@ -162,8 +167,8 @@
MarkStack* mark_stack_;
+ Heap* heap_;
HeapBitmap* mark_bitmap_;
-
HeapBitmap* live_bitmap_;
Object* finger_;
diff --git a/src/mutex.cc b/src/mutex.cc
index f7c3143..340a075 100644
--- a/src/mutex.cc
+++ b/src/mutex.cc
@@ -61,6 +61,7 @@
int rc = pthread_mutex_destroy(&mutex_);
if (rc != 0) {
errno = rc;
+ // TODO: should we just not log at all if shutting down? this could be the logging mutex!
bool shutting_down = Runtime::Current()->IsShuttingDown();
PLOG(shutting_down ? WARNING : FATAL) << "pthread_mutex_destroy failed for " << name_;
}
diff --git a/src/oat_writer.cc b/src/oat_writer.cc
index 5f97927..dcdff9a 100644
--- a/src/oat_writer.cc
+++ b/src/oat_writer.cc
@@ -277,7 +277,7 @@
Class::Status status =
(compiled_class != NULL) ? compiled_class->GetStatus() : Class::kStatusNotReady;
CHECK(gc_map_size != 0 || is_native || status < Class::kStatusVerified)
- << PrettyMethod(method_idx, *dex_file);
+ << &gc_map << " " << gc_map_size << " " << (is_native ? "true" : "false") << " " << (status < Class::kStatusVerified) << " " << status << " " << PrettyMethod(method_idx, *dex_file);
#endif
// Deduplicate GC maps
diff --git a/src/oatdump.cc b/src/oatdump.cc
index 4b492aa..5574bb7 100644
--- a/src/oatdump.cc
+++ b/src/oatdump.cc
@@ -543,7 +543,7 @@
oat_dumper_.reset(new OatDumper(*oat_file));
os_ << "OBJECTS:\n" << std::flush;
- HeapBitmap* heap_bitmap = Heap::GetLiveBits();
+ HeapBitmap* heap_bitmap = Runtime::Current()->GetHeap()->GetLiveBits();
DCHECK(heap_bitmap != NULL);
heap_bitmap->Walk(ImageDumper::Callback, this);
os_ << "\n";
@@ -1125,7 +1125,8 @@
return EXIT_FAILURE;
}
- ImageSpace* image_space = Heap::GetSpaces()[Heap::GetSpaces().size()-2]->AsImageSpace();
+ Heap* heap = Runtime::Current()->GetHeap();
+ ImageSpace* image_space = heap->GetSpaces()[heap->GetSpaces().size()-2]->AsImageSpace();
CHECK(image_space != NULL);
const ImageHeader& image_header = image_space->GetImageHeader();
if (!image_header.IsValid()) {
diff --git a/src/object.cc b/src/object.cc
index 0f5f1ec..2e98f4f 100644
--- a/src/object.cc
+++ b/src/object.cc
@@ -57,7 +57,8 @@
// Object::SizeOf gets the right size even if we're an array.
// Using c->AllocObject() here would be wrong.
size_t num_bytes = SizeOf();
- SirtRef<Object> copy(Heap::AllocObject(c, num_bytes));
+ Heap* heap = Runtime::Current()->GetHeap();
+ SirtRef<Object> copy(heap->AllocObject(c, num_bytes));
if (copy.get() == NULL) {
return NULL;
}
@@ -70,7 +71,7 @@
memcpy(dst_bytes + offset, src_bytes + offset, num_bytes - offset);
if (c->IsFinalizable()) {
- Heap::AddFinalizerReference(Thread::Current(), copy.get());
+ heap->AddFinalizerReference(Thread::Current(), copy.get());
}
return copy.get();
@@ -676,7 +677,7 @@
// TODO: decide whether we want this check. It currently fails during bootstrap.
// DCHECK(!Runtime::Current()->IsStarted() || IsInitializing()) << PrettyClass(this);
DCHECK_GE(this->object_size_, sizeof(Object));
- return Heap::AllocObject(this, this->object_size_);
+ return Runtime::Current()->GetHeap()->AllocObject(this, this->object_size_);
}
void Class::SetClassSize(size_t new_class_size) {
@@ -1251,7 +1252,8 @@
return NULL;
}
- Array* array = down_cast<Array*>(Heap::AllocObject(array_class, size));
+ Heap* heap = Runtime::Current()->GetHeap();
+ Array* array = down_cast<Array*>(heap->AllocObject(array_class, size));
if (array != NULL) {
DCHECK(array->IsArrayInstance());
array->SetLength(component_count);
diff --git a/src/object.h b/src/object.h
index 89b9df4..b3a4805 100644
--- a/src/object.h
+++ b/src/object.h
@@ -279,20 +279,20 @@
T GetFieldObject(MemberOffset field_offset, bool is_volatile) const {
DCHECK(Thread::Current() == NULL || Thread::Current()->CanAccessDirectReferences());
T result = reinterpret_cast<T>(GetField32(field_offset, is_volatile));
- Heap::VerifyObject(result);
+ Runtime::Current()->GetHeap()->VerifyObject(result);
return result;
}
void SetFieldObject(MemberOffset field_offset, const Object* new_value, bool is_volatile, bool this_is_valid = true) {
- Heap::VerifyObject(new_value);
+ Runtime::Current()->GetHeap()->VerifyObject(new_value);
SetField32(field_offset, reinterpret_cast<uint32_t>(new_value), is_volatile, this_is_valid);
if (new_value != NULL) {
- Heap::WriteBarrierField(this, field_offset, new_value);
+ Runtime::Current()->GetHeap()->WriteBarrierField(this, field_offset, new_value);
}
}
uint32_t GetField32(MemberOffset field_offset, bool is_volatile) const {
- Heap::VerifyObject(this);
+ Runtime::Current()->GetHeap()->VerifyObject(this);
const byte* raw_addr = reinterpret_cast<const byte*>(this) + field_offset.Int32Value();
const int32_t* word_addr = reinterpret_cast<const int32_t*>(raw_addr);
if (UNLIKELY(is_volatile)) {
@@ -304,7 +304,7 @@
void SetField32(MemberOffset field_offset, uint32_t new_value, bool is_volatile, bool this_is_valid = true) {
if (this_is_valid) {
- Heap::VerifyObject(this);
+ Runtime::Current()->GetHeap()->VerifyObject(this);
}
byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
uint32_t* word_addr = reinterpret_cast<uint32_t*>(raw_addr);
@@ -323,7 +323,7 @@
}
uint64_t GetField64(MemberOffset field_offset, bool is_volatile) const {
- Heap::VerifyObject(this);
+ Runtime::Current()->GetHeap()->VerifyObject(this);
const byte* raw_addr = reinterpret_cast<const byte*>(this) + field_offset.Int32Value();
const int64_t* addr = reinterpret_cast<const int64_t*>(raw_addr);
if (UNLIKELY(is_volatile)) {
@@ -336,7 +336,7 @@
}
void SetField64(MemberOffset field_offset, uint64_t new_value, bool is_volatile) {
- Heap::VerifyObject(this);
+ Runtime::Current()->GetHeap()->VerifyObject(this);
byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
int64_t* addr = reinterpret_cast<int64_t*>(raw_addr);
if (UNLIKELY(is_volatile)) {
@@ -2035,11 +2035,12 @@
MemberOffset src_offset(DataOffset(sizeof(Object*)).Int32Value() + src_pos * sizeof(Object*));
MemberOffset dst_offset(DataOffset(sizeof(Object*)).Int32Value() + dst_pos * sizeof(Object*));
Class* array_class = dst->GetClass();
+ Heap* heap = Runtime::Current()->GetHeap();
if (array_class == src->GetClass()) {
// No need for array store checks if arrays are of the same type
for (size_t i = 0; i < length; i++) {
Object* object = src->GetFieldObject<Object*>(src_offset, false);
- Heap::VerifyObject(object);
+ heap->VerifyObject(object);
// directly set field, we do a bulk write barrier at the end
dst->SetField32(dst_offset, reinterpret_cast<uint32_t>(object), false, true);
src_offset = MemberOffset(src_offset.Uint32Value() + sizeof(Object*));
@@ -2054,14 +2055,14 @@
dst->ThrowArrayStoreException(object);
return;
}
- Heap::VerifyObject(object);
+ heap->VerifyObject(object);
// directly set field, we do a bulk write barrier at the end
dst->SetField32(dst_offset, reinterpret_cast<uint32_t>(object), false, true);
src_offset = MemberOffset(src_offset.Uint32Value() + sizeof(Object*));
dst_offset = MemberOffset(dst_offset.Uint32Value() + sizeof(Object*));
}
}
- Heap::WriteBarrierArray(dst, dst_pos, length);
+ heap->WriteBarrierArray(dst, dst_pos, length);
}
}
diff --git a/src/object_test.cc b/src/object_test.cc
index b431d0a..3abe702 100644
--- a/src/object_test.cc
+++ b/src/object_test.cc
@@ -192,7 +192,7 @@
TEST_F(ObjectTest, StaticFieldFromCode) {
// pretend we are trying to access 'Static.s0' from StaticsFromCode.<clinit>
SirtRef<ClassLoader> class_loader(LoadDex("StaticsFromCode"));
- const DexFile* dex_file = ClassLoader::GetCompileTimeClassPath(class_loader.get())[0];
+ const DexFile* dex_file = Runtime::Current()->GetCompileTimeClassPath(class_loader.get())[0];
CHECK(dex_file != NULL);
Class* klass = class_linker_->FindClass("LStaticsFromCode;", class_loader.get());
diff --git a/src/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/src/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
index 51983ea..c5db4f6 100644
--- a/src/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
+++ b/src/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
@@ -16,6 +16,7 @@
#include "debugger.h"
#include "logging.h"
+#include "scoped_heap_lock.h"
#include "stack.h"
#include "thread_list.h"
diff --git a/src/runtime.cc b/src/runtime.cc
index 22f4329..c53ede1 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -47,12 +47,11 @@
Runtime* Runtime::instance_ = NULL;
-Mutex Runtime::abort_lock_("abort lock");
-
Runtime::Runtime()
: is_compiler_(false),
is_zygote_(false),
default_stack_size_(Thread::kDefaultStackSize),
+ heap_(NULL),
monitor_list_(NULL),
thread_list_(NULL),
intern_table_(NULL),
@@ -69,7 +68,8 @@
exit_(NULL),
abort_(NULL),
stats_enabled_(false),
- tracer_(NULL) {
+ tracer_(NULL),
+ use_compile_time_class_path_(false) {
for (int i = 0; i < Runtime::kLastTrampolineMethodType; i++) {
resolution_stub_array_[i] = NULL;
}
@@ -92,7 +92,7 @@
delete monitor_list_;
delete class_linker_;
- Heap::Destroy();
+ delete heap_;
verifier::DexVerifier::DeleteGcMaps();
delete intern_table_;
delete java_vm_;
@@ -129,10 +129,15 @@
}
};
+static Mutex& GetAbortLock() {
+ static Mutex abort_lock("abort lock");
+ return abort_lock;
+}
+
void Runtime::Abort(const char* file, int line) {
// Ensure that we don't have multiple threads trying to abort at once,
// which would result in significantly worse diagnostics.
- MutexLock mu(abort_lock_);
+ MutexLock mu(GetAbortLock());
// Get any pending output out of the way.
fflush(NULL);
@@ -294,6 +299,17 @@
parsed->hook_exit_ = exit;
parsed->hook_abort_ = abort;
+// gLogVerbosity.class_linker = true; // TODO: don't check this in!
+// gLogVerbosity.compiler = true; // TODO: don't check this in!
+// gLogVerbosity.heap = true; // TODO: don't check this in!
+// gLogVerbosity.gc = true; // TODO: don't check this in!
+// gLogVerbosity.jdwp = true; // TODO: don't check this in!
+// gLogVerbosity.jni = true; // TODO: don't check this in!
+// gLogVerbosity.monitor = true; // TODO: don't check this in!
+// gLogVerbosity.startup = true; // TODO: don't check this in!
+// gLogVerbosity.third_party_jni = true; // TODO: don't check this in!
+// gLogVerbosity.threads = true; // TODO: don't check this in!
+
for (size_t i = 0; i < options.size(); ++i) {
const std::string option(options[i].first);
if (true && options[0].first == "-Xzygote") {
@@ -469,7 +485,7 @@
}
void CreateSystemClassLoader() {
- if (ClassLoader::UseCompileTimeClassPath()) {
+ if (Runtime::Current()->UseCompileTimeClassPath()) {
return;
}
@@ -605,10 +621,12 @@
thread_list_ = new ThreadList;
intern_table_ = new InternTable;
- Heap::Init(options->heap_initial_size_,
- options->heap_growth_limit_,
- options->heap_maximum_size_,
- options->image_);
+ verifier::DexVerifier::InitGcMaps();
+
+ heap_ = new Heap(options->heap_initial_size_,
+ options->heap_growth_limit_,
+ options->heap_maximum_size_,
+ options->image_);
BlockSignals();
@@ -623,8 +641,8 @@
// Set us to runnable so tools using a runtime can allocate and GC by default
Thread::Current()->SetState(Thread::kRunnable);
- CHECK_GE(Heap::GetSpaces().size(), 1U);
- if (Heap::GetSpaces()[0]->IsImageSpace()) {
+ CHECK_GE(GetHeap()->GetSpaces().size(), 1U);
+ if (GetHeap()->GetSpaces()[0]->IsImageSpace()) {
class_linker_ = ClassLinker::CreateFromImage(intern_table_);
} else {
CHECK(options->boot_class_path_ != NULL);
@@ -697,7 +715,7 @@
}
void Runtime::DumpLockHolders(std::ostream& os) {
- pid_t heap_lock_owner = Heap::GetLockOwner();
+ pid_t heap_lock_owner = GetHeap()->GetLockOwner();
pid_t thread_list_lock_owner = GetThreadList()->GetLockOwner();
pid_t classes_lock_owner = GetClassLinker()->GetClassesLockOwner();
pid_t dex_lock_owner = GetClassLinker()->GetDexLockOwner();
@@ -971,4 +989,20 @@
return tracer_;
}
+const std::vector<const DexFile*>& Runtime::GetCompileTimeClassPath(const ClassLoader* class_loader) {
+ if (class_loader == NULL) {
+ return GetClassLinker()->GetBootClassPath();
+ }
+ CHECK(UseCompileTimeClassPath());
+ CompileTimeClassPaths::const_iterator it = compile_time_class_paths_.find(class_loader);
+ CHECK(it != compile_time_class_paths_.end());
+ return it->second;
+}
+
+void Runtime::SetCompileTimeClassPath(const ClassLoader* class_loader, std::vector<const DexFile*>& class_path) {
+ CHECK(!IsStarted());
+ use_compile_time_class_path_ = true;
+ compile_time_class_paths_[class_loader] = class_path;
+}
+
} // namespace art
diff --git a/src/runtime.h b/src/runtime.h
index 36f0084..a2ec08e 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -20,6 +20,7 @@
#include <stdio.h>
#include <iosfwd>
+#include <map>
#include <string>
#include <utility>
#include <vector>
@@ -148,6 +149,10 @@
return default_stack_size_;
}
+ Heap* GetHeap() const {
+ return heap_;
+ }
+
InternTable* GetInternTable() const {
return intern_table_;
}
@@ -232,6 +237,12 @@
bool IsMethodTracingActive() const;
Trace* GetTracer() const;
+ bool UseCompileTimeClassPath() const {
+ return use_compile_time_class_path_;
+ }
+ const std::vector<const DexFile*>& GetCompileTimeClassPath(const ClassLoader* class_loader);
+ void SetCompileTimeClassPath(const ClassLoader* class_loader, std::vector<const DexFile*>& class_path);
+
private:
static void PlatformAbort(const char*, int);
@@ -246,6 +257,9 @@
void StartDaemonThreads();
void StartSignalCatcher();
+ // A pointer to the active runtime or NULL.
+ static Runtime* instance_;
+
bool is_compiler_;
bool is_zygote_;
@@ -268,6 +282,8 @@
// The default stack size for managed threads created by the runtime.
size_t default_stack_size_;
+ Heap* heap_;
+
MonitorList* monitor_list_;
ThreadList* thread_list_;
@@ -307,10 +323,9 @@
Trace* tracer_;
- // A pointer to the active runtime or NULL.
- static Runtime* instance_;
-
- static Mutex abort_lock_;
+ typedef std::map<const ClassLoader*, std::vector<const DexFile*> > CompileTimeClassPaths;
+ CompileTimeClassPaths compile_time_class_paths_;
+ bool use_compile_time_class_path_;
DISALLOW_COPY_AND_ASSIGN(Runtime);
};
diff --git a/src/runtime_support.cc b/src/runtime_support.cc
index 6531eb2..92b5203 100644
--- a/src/runtime_support.cc
+++ b/src/runtime_support.cc
@@ -512,7 +512,7 @@
intptr_t value = *arg_ptr;
Object** value_as_jni_rep = reinterpret_cast<Object**>(value);
Object* value_as_work_around_rep = value_as_jni_rep != NULL ? *value_as_jni_rep : NULL;
- CHECK(Heap::IsHeapAddress(value_as_work_around_rep));
+ CHECK(Runtime::Current()->GetHeap()->IsHeapAddress(value_as_work_around_rep));
*arg_ptr = reinterpret_cast<intptr_t>(value_as_work_around_rep);
}
diff --git a/src/scoped_heap_lock.h b/src/scoped_heap_lock.h
new file mode 100644
index 0000000..0dee589
--- /dev/null
+++ b/src/scoped_heap_lock.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_SCOPED_HEAP_LOCK_H_
+#define ART_SRC_SCOPED_HEAP_LOCK_H_
+
+#include "heap.h"
+#include "macros.h"
+#include "runtime.h"
+
+namespace art {
+
+class ScopedHeapLock {
+ public:
+ ScopedHeapLock() {
+ Runtime::Current()->GetHeap()->Lock();
+ }
+
+ ~ScopedHeapLock() {
+ Runtime::Current()->GetHeap()->Unlock();
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ScopedHeapLock);
+};
+
+} // namespace art
+
+#endif // ART_SRC_SCOPED_HEAP_LOCK_H_
diff --git a/src/signal_catcher.cc b/src/signal_catcher.cc
index 4230834..bc358ed 100644
--- a/src/signal_catcher.cc
+++ b/src/signal_catcher.cc
@@ -30,6 +30,7 @@
#include "heap.h"
#include "os.h"
#include "runtime.h"
+#include "scoped_heap_lock.h"
#include "thread.h"
#include "thread_list.h"
#include "utils.h"
@@ -131,7 +132,7 @@
void SignalCatcher::HandleSigUsr1() {
LOG(INFO) << "SIGUSR1 forcing GC (no HPROF)";
- Heap::CollectGarbage(false);
+ Runtime::Current()->GetHeap()->CollectGarbage(false);
}
int SignalCatcher::WaitForSignal(sigset_t& mask) {
diff --git a/src/space.cc b/src/space.cc
index 6d5a31a..78fcdf5 100644
--- a/src/space.cc
+++ b/src/space.cc
@@ -176,13 +176,14 @@
// Callback from dlmalloc when it needs to increase the footprint
extern "C" void* art_heap_morecore(void* mspace, intptr_t increment) {
- AllocSpace* space = Heap::GetAllocSpace();
+ Heap* heap = Runtime::Current()->GetHeap();
+ AllocSpace* space = heap->GetAllocSpace();
if (LIKELY(space->GetMspace() == mspace)) {
return space->MoreCore(increment);
} else {
// Exhaustively search alloc spaces
- const std::vector<Space*>& spaces = Heap::GetSpaces();
- for (size_t i = 0; i < spaces.size(); i++) {
+ const std::vector<Space*>& spaces = heap->GetSpaces();
+ for (size_t i = 0; i < spaces.size(); ++i) {
if (spaces[i]->IsAllocSpace()) {
AllocSpace* space = spaces[i]->AsAllocSpace();
if (mspace == space->GetMspace()) {
@@ -191,7 +192,7 @@
}
}
LOG(FATAL) << "Unexpected call to art_heap_morecore. mspace: " << mspace
- << " increment: " << increment;
+ << " increment: " << increment;
return NULL;
}
}
diff --git a/src/space_test.cc b/src/space_test.cc
index eb4fa86..16ffb08 100644
--- a/src/space_test.cc
+++ b/src/space_test.cc
@@ -75,7 +75,7 @@
ASSERT_TRUE(space != NULL);
// Make space findable to the heap, will also delete space when runtime is cleaned up
- Heap::AddSpace(space);
+ Runtime::Current()->GetHeap()->AddSpace(space);
// Succeeds, fits without adjusting the footprint limit.
Object* ptr1 = space->AllocWithoutGrowth(1 * MB);
@@ -117,7 +117,7 @@
ASSERT_TRUE(space != NULL);
// Make space findable to the heap, will also delete space when runtime is cleaned up
- Heap::AddSpace(space);
+ Runtime::Current()->GetHeap()->AddSpace(space);
// Succeeds, fits without adjusting the max allowed footprint.
Object* lots_of_objects[1024];
@@ -309,7 +309,7 @@
EXPECT_EQ(space->NonGrowthLimitCapacity(), capacity);
// Make space findable to the heap, will also delete space when runtime is cleaned up
- Heap::AddSpace(space);
+ Runtime::Current()->GetHeap()->AddSpace(space);
// In this round we don't allocate with growth and therefore can't grow past the initial size.
// This effectively makes the growth_limit the initial_size, so assert this.
diff --git a/src/stack.cc b/src/stack.cc
index 7da20b0..4623ec1 100644
--- a/src/stack.cc
+++ b/src/stack.cc
@@ -23,10 +23,6 @@
namespace art {
-int oatVRegOffset(const DexFile::CodeItem* code_item,
- uint32_t core_spills, uint32_t fp_spills,
- size_t frame_size, int reg);
-
bool Frame::HasMethod() const {
return GetMethod() != NULL && (!GetMethod()->IsCalleeSaveMethod());
}
@@ -53,9 +49,26 @@
*reinterpret_cast<uintptr_t*>(pc_addr) = pc;
}
+/* Return sp-relative offset in bytes using Method* */
+static int GetVRegOffset(const DexFile::CodeItem* code_item,
+ uint32_t core_spills, uint32_t fp_spills,
+ size_t frame_size, int reg)
+{
+ static const int kStackAlignWords = kStackAlignment/sizeof(uint32_t);
+ int numIns = code_item->ins_size_;
+ int numRegs = code_item->registers_size_ - numIns;
+ int numOuts = code_item->outs_size_;
+ int numSpills = __builtin_popcount(core_spills) + __builtin_popcount(fp_spills);
+ int numPadding = (kStackAlignWords - (numSpills + numRegs + numOuts + 2)) & (kStackAlignWords - 1);
+ int regsOffset = (numOuts + numPadding + 1) * 4;
+ int insOffset = frame_size + 4;
+ return (reg < numRegs) ? regsOffset + (reg << 2) :
+ insOffset + ((reg - numRegs) << 2);
+}
+
uint32_t Frame::GetVReg(const DexFile::CodeItem* code_item, uint32_t core_spills,
uint32_t fp_spills, size_t frame_size, int vreg) const {
- int offset = oatVRegOffset(code_item, core_spills, fp_spills, frame_size, vreg);
+ int offset = GetVRegOffset(code_item, core_spills, fp_spills, frame_size, vreg);
byte* vreg_addr = reinterpret_cast<byte*>(sp_) + offset;
return *reinterpret_cast<uint32_t*>(vreg_addr);
}
@@ -77,7 +90,7 @@
uint32_t core_spills = m->GetCoreSpillMask();
uint32_t fp_spills = m->GetFpSpillMask();
size_t frame_size = m->GetFrameSizeInBytes();
- int offset = oatVRegOffset(code_item, core_spills, fp_spills, frame_size, vreg);
+ int offset = GetVRegOffset(code_item, core_spills, fp_spills, frame_size, vreg);
byte* vreg_addr = reinterpret_cast<byte*>(sp_) + offset;
*reinterpret_cast<uint32_t*>(vreg_addr) = new_value;
}
diff --git a/src/sun_misc_Unsafe.cc b/src/sun_misc_Unsafe.cc
index 68cd4f7..a561690 100644
--- a/src/sun_misc_Unsafe.cc
+++ b/src/sun_misc_Unsafe.cc
@@ -70,7 +70,7 @@
int result = android_atomic_release_cas(reinterpret_cast<int32_t>(expectedValue),
reinterpret_cast<int32_t>(newValue), address);
if (result == 0) {
- Heap::WriteBarrierField(obj, MemberOffset(offset), newValue);
+ Runtime::Current()->GetHeap()->WriteBarrierField(obj, MemberOffset(offset), newValue);
}
return (result == 0);
}
diff --git a/src/thread.cc b/src/thread.cc
index ee3bf26..ac0922d 100644
--- a/src/thread.cc
+++ b/src/thread.cc
@@ -68,7 +68,7 @@
static Method* gUncaughtExceptionHandler_uncaughtException = NULL;
void Thread::InitCardTable() {
- card_table_ = Heap::GetCardTable()->GetBiasedBegin();
+ card_table_ = Runtime::Current()->GetHeap()->GetCardTable()->GetBiasedBegin();
}
void Thread::InitFunctionPointers() {
@@ -540,7 +540,7 @@
#ifndef NDEBUG
if (sp != NULL) {
Method* m = *sp;
- Heap::VerifyObject(m);
+ Runtime::Current()->GetHeap()->VerifyObject(m);
DCHECK((m == NULL) || m->IsMethod());
}
#endif
@@ -1056,7 +1056,7 @@
JniAbort(NULL);
} else {
if (result != kInvalidIndirectRefObject) {
- Heap::VerifyObject(result);
+ Runtime::Current()->GetHeap()->VerifyObject(result);
}
}
return result;
diff --git a/src/thread_list.cc b/src/thread_list.cc
index e780e2c..b2cf676 100644
--- a/src/thread_list.cc
+++ b/src/thread_list.cc
@@ -19,6 +19,7 @@
#include <unistd.h>
#include "debugger.h"
+#include "scoped_heap_lock.h"
namespace art {
@@ -458,8 +459,9 @@
// started, we wait until it's over. Which means that if there's now another GC pending, our
// suspend count is non-zero, so switching to the runnable state will suspend us.
// TODO: find a better solution!
- Heap::Lock();
- Heap::Unlock();
+ {
+ ScopedHeapLock heap_lock;
+ }
self->SetState(Thread::kRunnable);
}