Merge "A few more MIPS fixes. Emulator boots up with black screen." into dalvik-dev
diff --git a/Android.mk b/Android.mk
index 3e17072..162becb 100644
--- a/Android.mk
+++ b/Android.mk
@@ -87,9 +87,6 @@
ifeq ($(ART_USE_LLVM_COMPILER),true)
include $(build_path)/Android.libart-compiler-llvm.mk
endif
-ifeq ($(ART_USE_GREENLAND_COMPILER),true)
-include $(build_path)/Android.libart-compiler-greenland.mk
-endif
include $(build_path)/Android.executable.mk
include $(build_path)/Android.oat.mk
diff --git a/build/Android.common.mk b/build/Android.common.mk
index 178af64..0d31237 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -24,7 +24,6 @@
ART_USE_PORTABLE_COMPILER := true
endif
ifeq ($(ART_USE_PORTABLE_COMPILER),true)
-WITH_ART_USE_QUICK_COMPILER := true
WITH_ART_USE_LLVM_COMPILER := true
endif
@@ -39,48 +38,8 @@
ART_USE_LLVM_COMPILER := true
endif
-ART_USE_DEXLANG_FRONTEND := false
-ifeq ($(ART_USE_LLVM_COMPILER),true)
- ifneq ($(wildcard art/USE_DEXLANG_FRONTEND),)
- $(info Enabling ART_USE_DEXLANG_FRONTEND because of existence of art/USE_DEXLANG_FRONTEND)
- ART_USE_DEXLANG_FRONTEND := true
- endif
- ifeq ($(WITH_ART_USE_DEXLANG_FRONTEND),true)
- $(info Enabling ART_USE_DEXLANG_FRONTEND because WITH_ART_USE_DEXLANG_FRONTEND=true)
- ART_USE_DEXLANG_FRONTEND := true
- endif
-endif
-
-ART_USE_GREENLAND_COMPILER := false
-ifneq ($(wildcard art/USE_GREENLAND_COMPILER),)
-$(info Enabling ART_USE_GREENLAND_COMPILER because of existence of art/USE_GREENLAND_COMPILER)
-ART_USE_GREENLAND_COMPILER := true
-endif
-ifeq ($(WITH_ART_USE_GREENLAND_COMPILER),true)
-$(info Enabling ART_USE_GREENLAND_COMPILER because WITH_ART_USE_GREENLAND_COMPILER=true)
-ART_USE_GREENLAND_COMPILER := true
-endif
-
-ART_USE_QUICK_COMPILER := false
-ifneq ($(wildcard art/USE_QUICK_COMPILER),)
-ART_USE_QUICK_COMPILER := true
-$(info Enabling ART_USE_QUICK_COMPILER because of existence of art/USE_QUICK_COMPILER)
-endif
-ifeq ($(WITH_ART_USE_QUICK_COMPILER),true)
-ART_USE_QUICK_COMPILER := true
-$(info Enabling ART_USE_QUICK_COMPILER because WITH_ART_USE_QUICK_COMPILER=true)
-endif
-
-ifeq ($(words $(filter true,$(ART_USE_LLVM_COMPILER) $(ART_USE_GREENLAND_COMPILER) $(ART_USE_QUICK_COMPILER))),0)
-ART_REQUIRE_LLVM := false
-else #!0
-ART_REQUIRE_LLVM := true
-endif #!0
-
-ifeq ($(ART_REQUIRE_LLVM),true)
LLVM_ROOT_PATH := external/llvm
include $(LLVM_ROOT_PATH)/llvm.mk
-endif
# directory used for gtests on device
ART_NATIVETEST_DIR := /data/nativetest/art
@@ -164,6 +123,7 @@
LIBART_COMMON_SRC_FILES := \
src/atomic.cc.arm \
+ src/barrier.cc \
src/check_jni.cc \
src/class_linker.cc \
src/common_throws.cc \
@@ -181,7 +141,6 @@
src/dlmalloc.cc \
src/file.cc \
src/file_linux.cc \
- src/gc/barrier.cc \
src/gc/card_table.cc \
src/gc/heap_bitmap.cc \
src/gc/large_object_space.cc \
@@ -195,6 +154,7 @@
src/image_writer.cc \
src/indirect_reference_table.cc \
src/intern_table.cc \
+ src/interpreter/interpreter.cc \
src/jdwp/jdwp_event.cc \
src/jdwp/jdwp_expand_buf.cc \
src/jdwp/jdwp_handler.cc \
@@ -257,6 +217,7 @@
src/stringprintf.cc \
src/thread.cc \
src/thread_list.cc \
+ src/thread_pool.cc \
src/trace.cc \
src/utf.cc \
src/utils.cc \
@@ -275,18 +236,6 @@
src/compiler_llvm/runtime_support_llvm.cc
endif
-ifeq ($(ART_USE_GREENLAND_COMPILER),true)
-LIBART_COMMON_SRC_FILES += \
- src/greenland/inferred_reg_category_map.cc \
- src/greenland/runtime_entry_points.cc \
- src/greenland/runtime/support_alloc.cc \
- src/greenland/runtime/support_cast.cc \
- src/greenland/runtime/support_dexcache.cc \
- src/greenland/runtime/support_exception.cc \
- src/greenland/runtime/support_field.cc \
- src/greenland/runtime/support_thread.cc
-endif
-
LIBART_COMMON_SRC_FILES += \
src/oat/runtime/context.cc \
src/oat/runtime/support_alloc.cc \
@@ -394,6 +343,7 @@
test/ReferenceMap/stack_walk_refmap_jni.cc
TEST_COMMON_SRC_FILES := \
+ src/barrier_test.cc \
src/class_linker_test.cc \
src/compiler_test.cc \
src/dex_cache_test.cc \
@@ -418,6 +368,7 @@
src/reference_table_test.cc \
src/runtime_support_test.cc \
src/runtime_test.cc \
+ src/thread_pool_test.cc \
src/utils_test.cc \
src/zip_archive_test.cc \
src/verifier/method_verifier_test.cc \
diff --git a/build/Android.executable.mk b/build/Android.executable.mk
index ac1d64e..d06b200 100644
--- a/build/Android.executable.mk
+++ b/build/Android.executable.mk
@@ -20,17 +20,9 @@
ART_EXECUTABLES_CFLAGS :=
ifeq ($(ART_USE_LLVM_COMPILER),true)
ART_EXECUTABLES_CFLAGS += -DART_USE_LLVM_COMPILER=1
- ifeq ($(ART_USE_DEXLANG_FRONTEND),true)
- ART_EXECUTABLES_CFLAGS += -DART_USE_DEXLANG_FRONTEND=1
- endif
endif
-
-ifeq ($(ART_USE_GREENLAND_COMPILER),true)
- ART_EXECUTABLES_CFLAGS += -DART_USE_GREENLAND_COMPILER=1
-endif
-
-ifeq ($(ART_USE_QUICK_COMPILER),true)
- ART_EXECUTABLES_CFLAGS += -DART_USE_QUICK_COMPILER=1
+ifeq ($(ART_USE_PORTABLE_COMPILER),true)
+ ART_EXECUTABLES_CFLAGS += -DART_USE_PORTABLE_COMPILER=1
endif
# $(1): executable ("d" will be appended for debug version)
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index de8c502..c3674f2 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -22,17 +22,9 @@
ART_TEST_CFLAGS :=
ifeq ($(ART_USE_LLVM_COMPILER),true)
ART_TEST_CFLAGS += -DART_USE_LLVM_COMPILER=1
- ifeq ($(ART_USE_DEXLANG_FRONTEND),true)
- ART_TEST_CFLAGS += -DART_USE_DEXLANG_FRONTEND=1
- endif
endif
-
-ifeq ($(ART_USE_GREENLAND_COMPILER),true)
- ART_TEST_CFLAGS += -DART_USE_GREENLAND_COMPILER=1
-endif
-
-ifeq ($(ART_USE_QUICK_COMPILER),true)
- ART_TEST_CFLAGS += -DART_USE_QUICK_COMPILER=1
+ifeq ($(ART_USE_PORTABLE_COMPILER),true)
+ ART_TEST_CFLAGS += -DART_USE_PORTABLE_COMPILER=1
endif
# $(1): target or host
@@ -77,7 +69,7 @@
LOCAL_CFLAGS := $(ART_TEST_CFLAGS)
ifeq ($$(art_target_or_host),target)
LOCAL_CFLAGS += $(ART_TARGET_CFLAGS) $(ART_TARGET_DEBUG_CFLAGS)
- LOCAL_SHARED_LIBRARIES += libdl libicuuc libicui18n libnativehelper libstlport libz
+ LOCAL_SHARED_LIBRARIES += libdl libicuuc libicui18n libnativehelper libstlport libz libcutils
LOCAL_STATIC_LIBRARIES += libgtest
LOCAL_MODULE_PATH := $(ART_NATIVETEST_OUT)
include $(BUILD_EXECUTABLE)
@@ -86,6 +78,7 @@
else # host
LOCAL_CFLAGS += $(ART_HOST_CFLAGS) $(ART_HOST_DEBUG_CFLAGS)
LOCAL_SHARED_LIBRARIES += libicuuc-host libicui18n-host libnativehelper libz-host
+ LOCAL_STATIC_LIBRARIES += libcutils
ifeq ($(HOST_OS),darwin)
# Mac OS complains about unresolved symbols if you don't include this.
LOCAL_WHOLE_STATIC_LIBRARIES := libgtest_host
diff --git a/build/Android.libart-compiler-greenland.mk b/build/Android.libart-compiler-greenland.mk
deleted file mode 100644
index 9baef69..0000000
--- a/build/Android.libart-compiler-greenland.mk
+++ /dev/null
@@ -1,206 +0,0 @@
-#
-# Copyright (C) 2012 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-
-LIBART_COMPILER_GREENLAND_CFLAGS := -DART_USE_GREENLAND_COMPILER=1
-
-LIBART_COMPILER_GREENLAND_SRC_FILES += \
- src/greenland/inferred_reg_category_map.cc \
- src/greenland/dalvik_reg.cc \
- src/greenland/dex_lang.cc \
- src/greenland/gbc_context.cc \
- src/greenland/greenland.cc \
- src/greenland/intrinsic_helper.cc \
- src/greenland/register_allocator.cc \
- src/greenland/target_codegen_machine.cc \
- src/greenland/target_registry.cc \
- src/oat/jni/calling_convention.cc \
- src/oat/jni/jni_compiler.cc \
- src/oat/jni/arm/calling_convention_arm.cc \
- src/oat/jni/mips/calling_convention_mips.cc \
- src/oat/jni/x86/calling_convention_x86.cc
-
-LIBART_COMPILER_GREENLAND_arm_SRC_FILES += \
- src/greenland/arm/arm_codegen_machine.cc \
- src/greenland/arm/arm_invoke_stub_compiler.cc
-
-LIBART_COMPILER_GREENLAND_mips_SRC_FILES += \
- src/greenland/mips/mips_codegen_machine.cc \
- src/greenland/mips/mips_invoke_stub_compiler.cc
-
-LIBART_COMPILER_GREENLAND_x86_SRC_FILES += \
- src/greenland/x86/x86_codegen_machine.cc \
- src/greenland/x86/x86_lir_emitter.cc \
- src/greenland/x86/x86_lir_info.cc \
- src/greenland/x86/x86_invoke_stub_compiler.cc
-
-########################################################################
-
-include $(CLEAR_VARS)
-LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
-LOCAL_MODULE := target_lir_builder_generator
-LOCAL_MODULE_TAGS := optional
-LOCAL_IS_HOST_MODULE := true
-LOCAL_SRC_FILES := src/greenland/tools/target_lir_builder_generator.cc
-LOCAL_CFLAGS := $(ART_HOST_CFLAGS) $(ART_HOST_DEBUG_CFLAGS)
-LOCAL_C_INCLUDES := $(ART_C_INCLUDES)
-include $(BUILD_HOST_EXECUTABLE)
-TARGET_LIR_BUILDER_GENERATOR := $(LOCAL_BUILT_MODULE)
-
-########################################################################
-
-# $(1): target or host
-# $(2): ndebug or debug
-define build-libart-compiler-greenland
- ifneq ($(1),target)
- ifneq ($(1),host)
- $$(error expected target or host for argument 1, received $(1))
- endif
- endif
- ifneq ($(2),ndebug)
- ifneq ($(2),debug)
- $$(error expected ndebug or debug for argument 2, received $(2))
- endif
- endif
-
- art_target_or_host := $(1)
- art_ndebug_or_debug := $(2)
-
- include $(CLEAR_VARS)
- ifeq ($$(art_target_or_host),target)
- include external/stlport/libstlport.mk
- endif
- LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
- ifeq ($$(art_ndebug_or_debug),ndebug)
- LOCAL_MODULE := libart-compiler-greenland
- else # debug
- LOCAL_MODULE := libartd-compiler-greenland
- endif
-
- LOCAL_MODULE_TAGS := optional
- LOCAL_MODULE_CLASS := SHARED_LIBRARIES
-
- LOCAL_SRC_FILES := $(LIBART_COMPILER_GREENLAND_SRC_FILES)
- LOCAL_CFLAGS := $(LIBART_COMPILER_GREENLAND_CFLAGS)
- ifeq ($$(art_target_or_host),target)
- LOCAL_CFLAGS += $(ART_TARGET_CFLAGS)
- else # host
- LOCAL_IS_HOST_MODULE := true
- LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
- endif
-
- LOCAL_C_INCLUDES += $(ART_C_INCLUDES)
-
- ifeq ($$(art_target_or_host),target)
- ENUM_INCLUDE_LIR_TARGETS := arm
- LOCAL_SRC_FILES += \
- $(LIBART_COMPILER_GREENLAND_$(TARGET_ARCH)_SRC_FILES)
- else
- ENUM_INCLUDE_LIR_TARGETS := arm mips x86
- LOCAL_SRC_FILES += \
- $(LIBART_COMPILER_GREENLAND_arm_SRC_FILES) \
- $(LIBART_COMPILER_GREENLAND_mips_SRC_FILES) \
- $(LIBART_COMPILER_GREENLAND_x86_SRC_FILES)
- endif
-
- GENERATED_SRC_DIR := $$(call intermediates-dir-for,$$(LOCAL_MODULE_CLASS),$$(LOCAL_MODULE),$$(LOCAL_IS_HOST_MODULE),)
- ENUM_TARGEET_LIR_BUILDER_INC_FILES := $$(foreach lir_target, $$(ENUM_INCLUDE_LIR_TARGETS), $$(lir_target)_lir_builder_base.inc)
- ENUM_TARGET_LIR_BUILDER_OUT_GEN := $$(addprefix $$(GENERATED_SRC_DIR)/, $$(ENUM_TARGEET_LIR_BUILDER_INC_FILES))
-
-$$(ENUM_TARGET_LIR_BUILDER_OUT_GEN): PRIVATE_LIR_TARGET = $$(subst _lir_builder_base.inc,,$$(notdir $$@))
-$$(ENUM_TARGET_LIR_BUILDER_OUT_GEN): %.inc : $$(TARGET_LIR_BUILDER_GENERATOR)
- @echo "target Generated: $$@"
- $$(hide) $$(TARGET_LIR_BUILDER_GENERATOR) $$(PRIVATE_LIR_TARGET) > $$@
-
-LOCAL_GENERATED_SOURCES += $$(ENUM_TARGET_LIR_BUILDER_OUT_GEN)
-
- LOCAL_STATIC_LIBRARIES += \
- libLLVMBitWriter \
- libLLVMBitReader \
- libLLVMScalarOpts \
- libLLVMInstCombine \
- libLLVMTransformUtils \
- libLLVMAnalysis \
- libLLVMTarget \
- libLLVMCore \
- libLLVMSupport
- LOCAL_SHARED_LIBRARIES := liblog libnativehelper
- ifeq ($$(art_target_or_host),target)
- LOCAL_SHARED_LIBRARIES += libcutils libstlport libz libdl
- LOCAL_SHARED_LIBRARIES += libdynamic_annotations # tsan support
- LOCAL_SHARED_LIBRARIES += libcorkscrew # native stack trace support
- else # host
- LOCAL_STATIC_LIBRARIES += libcutils
- LOCAL_SHARED_LIBRARIES += libz-host
- LOCAL_SHARED_LIBRARIES += libdynamic_annotations-host # tsan support
- LOCAL_LDLIBS := -ldl -lpthread
- ifeq ($(HOST_OS),linux)
- LOCAL_LDLIBS += -lrt
- endif
- endif
- ifeq ($$(art_ndebug_or_debug),debug)
- ifeq ($$(art_target_or_host),target)
- LOCAL_CFLAGS += $(ART_TARGET_DEBUG_CFLAGS)
- else # host
- LOCAL_CFLAGS += $(ART_HOST_DEBUG_CFLAGS)
- endif
- LOCAL_SHARED_LIBRARIES += libartd
- else
- ifeq ($$(art_target_or_host),target)
- LOCAL_CFLAGS += $(ART_TARGET_NON_DEBUG_CFLAGS)
- else # host
- LOCAL_CFLAGS += $(ART_HOST_NON_DEBUG_CFLAGS)
- endif
- LOCAL_SHARED_LIBRARIES += libart
- endif
- ifeq ($$(art_target_or_host),target)
- include $(LLVM_GEN_INTRINSICS_MK)
- include $(LLVM_DEVICE_BUILD_MK)
- include $(BUILD_SHARED_LIBRARY)
- else # host
- include $(LLVM_GEN_INTRINSICS_MK)
- include $(LLVM_HOST_BUILD_MK)
- include $(BUILD_HOST_SHARED_LIBRARY)
- endif
-
- ifeq ($$(art_target_or_host),target)
- ifeq ($$(art_ndebug_or_debug),debug)
- $(TARGET_OUT_EXECUTABLES)/dex2oatd: $$(LOCAL_INSTALLED_MODULE)
- else
- $(TARGET_OUT_EXECUTABLES)/dex2oat: $$(LOCAL_INSTALLED_MODULE)
- endif
- else # host
- ifeq ($$(art_ndebug_or_debug),debug)
- $(HOST_OUT_EXECUTABLES)/dex2oatd: $$(LOCAL_INSTALLED_MODULE)
- else
- $(HOST_OUT_EXECUTABLES)/dex2oat: $$(LOCAL_INSTALLED_MODULE)
- endif
- endif
-endef
-
-
-ifeq ($(ART_BUILD_TARGET_NDEBUG),true)
- $(eval $(call build-libart-compiler-greenland,target,ndebug))
-endif
-ifeq ($(ART_BUILD_TARGET_DEBUG),true)
- $(eval $(call build-libart-compiler-greenland,target,debug))
-endif
-ifeq ($(ART_BUILD_HOST_NDEBUG),true)
- $(eval $(call build-libart-compiler-greenland,host,ndebug))
-endif
-ifeq ($(ART_BUILD_HOST_DEBUG),true)
- $(eval $(call build-libart-compiler-greenland,host,debug))
-endif
diff --git a/build/Android.libart-compiler-llvm.mk b/build/Android.libart-compiler-llvm.mk
index 332b809..48731f4 100644
--- a/build/Android.libart-compiler-llvm.mk
+++ b/build/Android.libart-compiler-llvm.mk
@@ -16,11 +16,8 @@
LIBART_COMPILER_LLVM_CFLAGS := -DART_USE_LLVM_COMPILER
-ifeq ($(ART_USE_DEXLANG_FRONTEND),true)
- LIBART_COMPILER_LLVM_CFLAGS += -DART_USE_DEXLANG_FRONTEND
-endif
-ifeq ($(ART_USE_QUICK_COMPILER),true)
- LIBART_COMPILER_LLVM_CFLAGS += -DART_USE_QUICK_COMPILER
+ifeq ($(ART_USE_PORTABLE_COMPILER),true)
+ ART_TEST_CFLAGS += -DART_USE_PORTABLE_COMPILER=1
endif
LIBART_COMPILER_LLVM_SRC_FILES += \
@@ -38,37 +35,28 @@
src/compiler_llvm/stub_compiler.cc \
src/greenland/inferred_reg_category_map.cc
-ifeq ($(ART_USE_DEXLANG_FRONTEND),true)
+ifeq ($(ART_USE_PORTABLE_COMPILER),true)
LIBART_COMPILER_LLVM_SRC_FILES += \
+ src/compiler/Dataflow.cc \
+ src/compiler/Frontend.cc \
+ src/compiler/IntermediateRep.cc \
+ src/compiler/Ralloc.cc \
+ src/compiler/SSATransformation.cc \
+ src/compiler/Utility.cc \
+ src/compiler/codegen/RallocUtil.cc \
+ src/compiler/codegen/arm/ArchUtility.cc \
+ src/compiler/codegen/arm/ArmRallocUtil.cc \
+ src/compiler/codegen/arm/Assemble.cc \
+ src/compiler/codegen/arm/armv7-a/Codegen.cc \
+ src/compiler_llvm/dalvik_reg.cc \
src/compiler_llvm/gbc_expander.cc \
- src/greenland/dalvik_reg.cc \
- src/greenland/dex_lang.cc \
+ src/compiler_llvm/method_compiler.cc \
src/greenland/intrinsic_helper.cc \
src/greenland/ir_builder.cc
else
- ifeq ($(ART_USE_QUICK_COMPILER),true)
- LIBART_COMPILER_LLVM_SRC_FILES += \
- src/compiler/Dataflow.cc \
- src/compiler/Frontend.cc \
- src/compiler/IntermediateRep.cc \
- src/compiler/Ralloc.cc \
- src/compiler/SSATransformation.cc \
- src/compiler/Utility.cc \
- src/compiler/codegen/RallocUtil.cc \
- src/compiler/codegen/arm/ArchUtility.cc \
- src/compiler/codegen/arm/ArmRallocUtil.cc \
- src/compiler/codegen/arm/Assemble.cc \
- src/compiler/codegen/arm/armv7-a/Codegen.cc \
- src/compiler_llvm/dalvik_reg.cc \
- src/compiler_llvm/gbc_expander.cc \
- src/compiler_llvm/method_compiler.cc \
- src/greenland/intrinsic_helper.cc \
- src/greenland/ir_builder.cc
- else
- LIBART_COMPILER_LLVM_SRC_FILES += \
- src/compiler_llvm/dalvik_reg.cc \
- src/compiler_llvm/method_compiler.cc
- endif
+ LIBART_COMPILER_LLVM_SRC_FILES += \
+ src/compiler_llvm/dalvik_reg.cc \
+ src/compiler_llvm/method_compiler.cc
endif
# $(1): target or host
diff --git a/build/Android.libart-compiler.mk b/build/Android.libart-compiler.mk
index 994bcca..323017a 100644
--- a/build/Android.libart-compiler.mk
+++ b/build/Android.libart-compiler.mk
@@ -26,13 +26,9 @@
src/oat/jni/jni_compiler.cc \
src/oat/jni/arm/calling_convention_arm.cc \
src/oat/jni/mips/calling_convention_mips.cc \
- src/oat/jni/x86/calling_convention_x86.cc
-
-ifeq ($(ART_USE_QUICK_COMPILER), true)
-LIBART_COMPILER_COMMON_SRC_FILES += \
+ src/oat/jni/x86/calling_convention_x86.cc \
src/greenland/ir_builder.cc \
src/greenland/intrinsic_helper.cc
-endif
LIBART_COMPILER_arm_SRC_FILES += \
$(LIBART_COMPILER_COMMON_SRC_FILES) \
@@ -118,17 +114,15 @@
endif
LOCAL_SHARED_LIBRARIES += libart
endif
- ifeq ($(ART_USE_QUICK_COMPILER), true)
- LOCAL_SHARED_LIBRARIES += libbcc
- endif
+ LOCAL_SHARED_LIBRARIES += libbcc
# TODO: temporary hack for testing.
ifeq ($$(libart_compiler_arch),mips)
LOCAL_CFLAGS += -D__mips_hard_float
endif
- ifeq ($(ART_USE_QUICK_COMPILER), true)
- LOCAL_CFLAGS += -DART_USE_QUICK_COMPILER
+ ifeq ($(ART_USE_PORTABLE_COMPILER),true)
+ ART_TEST_CFLAGS += -DART_USE_PORTABLE_COMPILER=1
endif
LOCAL_C_INCLUDES += $(ART_C_INCLUDES)
@@ -139,19 +133,15 @@
LOCAL_LDLIBS := -ldl -lpthread
endif
ifeq ($$(art_target_or_host),target)
- ifeq ($(ART_USE_QUICK_COMPILER), true)
- LOCAL_SHARED_LIBRARIES += libcutils
- include $(LLVM_GEN_INTRINSICS_MK)
- include $(LLVM_DEVICE_BUILD_MK)
- endif
+ LOCAL_SHARED_LIBRARIES += libcutils
+ include $(LLVM_GEN_INTRINSICS_MK)
+ include $(LLVM_DEVICE_BUILD_MK)
include $(BUILD_SHARED_LIBRARY)
else # host
LOCAL_IS_HOST_MODULE := true
- ifeq ($(ART_USE_QUICK_COMPILER), true)
- LOCAL_STATIC_LIBRARIES += libcutils
- include $(LLVM_GEN_INTRINSICS_MK)
- include $(LLVM_HOST_BUILD_MK)
- endif
+ LOCAL_STATIC_LIBRARIES += libcutils
+ include $(LLVM_GEN_INTRINSICS_MK)
+ include $(LLVM_HOST_BUILD_MK)
include $(BUILD_HOST_SHARED_LIBRARY)
endif
diff --git a/build/Android.libart.mk b/build/Android.libart.mk
index 43ce31a..435056a 100644
--- a/build/Android.libart.mk
+++ b/build/Android.libart.mk
@@ -17,17 +17,9 @@
LIBART_CFLAGS :=
ifeq ($(ART_USE_LLVM_COMPILER),true)
LIBART_CFLAGS += -DART_USE_LLVM_COMPILER=1
- ifeq ($(ART_USE_DEXLANG_FRONTEND),true)
- LIBART_CFLAGS += -DART_USE_DEXLANG_FRONTEND=1
- endif
endif
-
-ifeq ($(ART_USE_GREENLAND_COMPILER),true)
- LIBART_CFLAGS += -DART_USE_GREENLAND_COMPILER=1
-endif
-
-ifeq ($(ART_USE_QUICK_COMPILER),true)
- LIBART_CFLAGS += -DART_USE_QUICK_COMPILER=1
+ifeq ($(ART_USE_PORTABLE_COMPILER),true)
+ ART_TEST_CFLAGS += -DART_USE_PORTABLE_COMPILER=1
endif
# $(1): target or host
@@ -115,17 +107,12 @@
LOCAL_LDLIBS += -lrt
endif
endif
+ include $(LLVM_GEN_INTRINSICS_MK)
ifeq ($$(art_target_or_host),target)
- ifeq ($(ART_REQUIRE_LLVM),true)
- include $(LLVM_GEN_INTRINSICS_MK)
- include $(LLVM_DEVICE_BUILD_MK)
- endif
+ include $(LLVM_DEVICE_BUILD_MK)
include $(BUILD_SHARED_LIBRARY)
else # host
- ifeq ($(ART_REQUIRE_LLVM),true)
- include $(LLVM_GEN_INTRINSICS_MK)
- include $(LLVM_HOST_BUILD_MK)
- endif
+ include $(LLVM_HOST_BUILD_MK)
include $(BUILD_HOST_SHARED_LIBRARY)
endif
endef
diff --git a/src/atomic_integer.h b/src/atomic_integer.h
index 54d5fd8..adf3e77 100644
--- a/src/atomic_integer.h
+++ b/src/atomic_integer.h
@@ -17,7 +17,8 @@
#ifndef ART_SRC_ATOMIC_INTEGER_H_
#define ART_SRC_ATOMIC_INTEGER_H_
-#include "atomic.h"
+#include "cutils/atomic.h"
+#include "cutils/atomic-inline.h"
namespace art {
@@ -62,6 +63,14 @@
int32_t operator -- (int32_t) {
return android_atomic_dec(&value_);
}
+
+ int32_t operator ++ () {
+ return android_atomic_inc(&value_) + 1;
+ }
+
+ int32_t operator -- () {
+ return android_atomic_dec(&value_) - 1;
+ }
private:
int32_t value_;
};
diff --git a/src/gc/barrier.cc b/src/barrier.cc
similarity index 96%
rename from src/gc/barrier.cc
rename to src/barrier.cc
index aa9433b..9651828 100644
--- a/src/gc/barrier.cc
+++ b/src/barrier.cc
@@ -1,5 +1,5 @@
#include "barrier.h"
-#include "../mutex.h"
+#include "../src/mutex.h"
#include "thread.h"
namespace art {
diff --git a/src/gc/barrier.h b/src/barrier.h
similarity index 94%
rename from src/gc/barrier.h
rename to src/barrier.h
index 207536a..342890b 100644
--- a/src/gc/barrier.h
+++ b/src/barrier.h
@@ -14,10 +14,10 @@
* limitations under the License.
*/
-#ifndef ART_SRC_GC_BARRIER_H_
-#define ART_SRC_GC_BARRIER_H_
+#ifndef ART_SRC_BARRIER_H_
+#define ART_SRC_BARRIER_H_
-#include "../mutex.h"
+#include "../src/mutex.h"
#include "locks.h"
#include "UniquePtr.h"
diff --git a/src/barrier_test.cc b/src/barrier_test.cc
new file mode 100644
index 0000000..43b279e
--- /dev/null
+++ b/src/barrier_test.cc
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "barrier.h"
+
+#include <string>
+
+#include "atomic_integer.h"
+#include "common_test.h"
+#include "thread_pool.h"
+#include "UniquePtr.h"
+
+namespace art {
+class CheckWaitClosure : public Closure {
+ public:
+ CheckWaitClosure(Barrier* barrier, AtomicInteger* count1, AtomicInteger* count2,
+ AtomicInteger* count3)
+ : barrier_(barrier),
+ count1_(count1),
+ count2_(count2),
+ count3_(count3) {
+
+ }
+
+ void Run(Thread* self) {
+ LOG(INFO) << "Before barrier 1 " << self;
+ ++*count1_;
+ barrier_->Wait(self);
+ ++*count2_;
+ LOG(INFO) << "Before barrier 2 " << self;
+ barrier_->Wait(self);
+ ++*count3_;
+ LOG(INFO) << "After barrier 2 " << self;
+ delete this;
+ }
+ private:
+ Barrier* const barrier_;
+ AtomicInteger* const count1_;
+ AtomicInteger* const count2_;
+ AtomicInteger* const count3_;
+};
+
+class BarrierTest : public CommonTest {
+ public:
+ static int32_t num_threads;
+};
+
+int32_t BarrierTest::num_threads = 4;
+
+// Check that barrier wait and barrier increment work.
+TEST_F(BarrierTest, CheckWait) {
+ Thread* self = Thread::Current();
+ ThreadPool thread_pool(num_threads);
+ Barrier barrier;
+ AtomicInteger count1 = 0;
+ AtomicInteger count2 = 0;
+ AtomicInteger count3 = 0;
+ for (int32_t i = 0; i < num_threads; ++i) {
+ thread_pool.AddTask(self, new CheckWaitClosure(&barrier, &count1, &count2, &count3));
+ }
+ thread_pool.StartWorkers(self);
+ barrier.Increment(self, num_threads);
+ // At this point each thread should have passed through the barrier. The first count should be
+ // equal to num_threads.
+ EXPECT_EQ(num_threads, count1);
+ // Count 3 should still be zero since no thread should have gone past the second barrier.
+ EXPECT_EQ(0, count3);
+ // Now lets tell the threads to pass again.
+ barrier.Increment(self, num_threads);
+ // Count 2 should be equal to num_threads since each thread must have passed the second barrier
+ // at this point.
+ EXPECT_EQ(num_threads, count2);
+ // Wait for all the threads to finish.
+ thread_pool.Wait(self);
+ // All three counts should be equal to num_threads now.
+ EXPECT_EQ(count1, count2);
+ EXPECT_EQ(count2, count3);
+ EXPECT_EQ(num_threads, count3);
+}
+
+class CheckPassClosure : public Closure {
+ public:
+ CheckPassClosure(Barrier* barrier, AtomicInteger* count, size_t subtasks)
+ : barrier_(barrier),
+ count_(count),
+ subtasks_(subtasks) {
+
+ }
+
+ void Run(Thread* self) {
+ for (size_t i = 0; i < subtasks_; ++i) {
+ ++*count_;
+ // Pass through to next subtask.
+ barrier_->Pass(self);
+ }
+ delete this;
+ }
+ private:
+ Barrier* const barrier_;
+ AtomicInteger* const count_;
+ const size_t subtasks_;
+};
+
+// Check that barrier pass through works.
+TEST_F(BarrierTest, CheckPass) {
+ Thread* self = Thread::Current();
+ ThreadPool thread_pool(num_threads);
+ Barrier barrier;
+ AtomicInteger count = 0;
+ const int32_t num_tasks = num_threads * 4;
+ const int32_t num_sub_tasks = 128;
+ for (int32_t i = 0; i < num_tasks; ++i) {
+ thread_pool.AddTask(self, new CheckPassClosure(&barrier, &count, num_sub_tasks));
+ }
+ thread_pool.StartWorkers(self);
+ const int32_t expected_total_tasks = num_sub_tasks * num_tasks;
+ // Wait for all the tasks to complete using the barrier.
+ barrier.Increment(self, expected_total_tasks);
+ // The total number of completed tasks should be equal to expected_total_tasks.
+ EXPECT_EQ(count, expected_total_tasks);
+}
+
+} // namespace art
diff --git a/src/class_linker.cc b/src/class_linker.cc
index a76c9c8..75f0f38 100644
--- a/src/class_linker.cc
+++ b/src/class_linker.cc
@@ -1584,7 +1584,8 @@
klass->SetClass(GetClassRoot(kJavaLangClass));
uint32_t access_flags = dex_class_def.access_flags_;
// Make sure that none of our runtime-only flags are set.
- CHECK_EQ(access_flags & ~kAccJavaFlagsMask, 0U);
+ // TODO: JACK CLASS ACCESS (HACK TO BE REMOVED)
+ CHECK_EQ(access_flags & ~(kAccJavaFlagsMask | kAccClassJack), 0U);
klass->SetAccessFlags(access_flags);
klass->SetClassLoader(class_loader);
DCHECK_EQ(klass->GetPrimitiveType(), Primitive::kPrimNot);
@@ -2354,8 +2355,8 @@
}
{
ObjectLock lock(self, klass.get()); // Must hold lock on object when initializing.
- interfaces_sfield->SetObject(NULL, interfaces);
- throws_sfield->SetObject(NULL, throws);
+ interfaces_sfield->SetObject(klass.get(), interfaces);
+ throws_sfield->SetObject(klass.get(), throws);
klass->SetStatus(Class::kStatusInitialized);
}
diff --git a/src/class_linker_test.cc b/src/class_linker_test.cc
index 1c41f3b..d32e91e 100644
--- a/src/class_linker_test.cc
+++ b/src/class_linker_test.cc
@@ -871,68 +871,68 @@
FieldHelper fh(s0);
EXPECT_STREQ(ClassHelper(s0->GetClass()).GetDescriptor(), "Ljava/lang/reflect/Field;");
EXPECT_TRUE(fh.GetTypeAsPrimitiveType() == Primitive::kPrimBoolean);
- EXPECT_EQ(true, s0->GetBoolean(NULL));
- s0->SetBoolean(NULL, false);
+ EXPECT_EQ(true, s0->GetBoolean(statics));
+ s0->SetBoolean(statics, false);
Field* s1 = statics->FindStaticField("s1", "B");
fh.ChangeField(s1);
EXPECT_TRUE(fh.GetTypeAsPrimitiveType() == Primitive::kPrimByte);
- EXPECT_EQ(5, s1->GetByte(NULL));
- s1->SetByte(NULL, 6);
+ EXPECT_EQ(5, s1->GetByte(statics));
+ s1->SetByte(statics, 6);
Field* s2 = statics->FindStaticField("s2", "C");
fh.ChangeField(s2);
EXPECT_TRUE(fh.GetTypeAsPrimitiveType() == Primitive::kPrimChar);
- EXPECT_EQ('a', s2->GetChar(NULL));
- s2->SetChar(NULL, 'b');
+ EXPECT_EQ('a', s2->GetChar(statics));
+ s2->SetChar(statics, 'b');
Field* s3 = statics->FindStaticField("s3", "S");
fh.ChangeField(s3);
EXPECT_TRUE(fh.GetTypeAsPrimitiveType() == Primitive::kPrimShort);
- EXPECT_EQ(-536, s3->GetShort(NULL));
- s3->SetShort(NULL, -535);
+ EXPECT_EQ(-536, s3->GetShort(statics));
+ s3->SetShort(statics, -535);
Field* s4 = statics->FindStaticField("s4", "I");
fh.ChangeField(s4);
EXPECT_TRUE(fh.GetTypeAsPrimitiveType() == Primitive::kPrimInt);
- EXPECT_EQ(2000000000, s4->GetInt(NULL));
- s4->SetInt(NULL, 2000000001);
+ EXPECT_EQ(2000000000, s4->GetInt(statics));
+ s4->SetInt(statics, 2000000001);
Field* s5 = statics->FindStaticField("s5", "J");
fh.ChangeField(s5);
EXPECT_TRUE(fh.GetTypeAsPrimitiveType() == Primitive::kPrimLong);
- EXPECT_EQ(0x1234567890abcdefLL, s5->GetLong(NULL));
- s5->SetLong(NULL, 0x34567890abcdef12LL);
+ EXPECT_EQ(0x1234567890abcdefLL, s5->GetLong(statics));
+ s5->SetLong(statics, 0x34567890abcdef12LL);
Field* s6 = statics->FindStaticField("s6", "F");
fh.ChangeField(s6);
EXPECT_TRUE(fh.GetTypeAsPrimitiveType() == Primitive::kPrimFloat);
- EXPECT_EQ(0.5, s6->GetFloat(NULL));
- s6->SetFloat(NULL, 0.75);
+ EXPECT_EQ(0.5, s6->GetFloat(statics));
+ s6->SetFloat(statics, 0.75);
Field* s7 = statics->FindStaticField("s7", "D");
fh.ChangeField(s7);
EXPECT_TRUE(fh.GetTypeAsPrimitiveType() == Primitive::kPrimDouble);
- EXPECT_EQ(16777217, s7->GetDouble(NULL));
- s7->SetDouble(NULL, 16777219);
+ EXPECT_EQ(16777217, s7->GetDouble(statics));
+ s7->SetDouble(statics, 16777219);
Field* s8 = statics->FindStaticField("s8", "Ljava/lang/String;");
fh.ChangeField(s8);
EXPECT_TRUE(fh.GetTypeAsPrimitiveType() == Primitive::kPrimNot);
- EXPECT_TRUE(s8->GetObject(NULL)->AsString()->Equals("android"));
- s8->SetObject(NULL, String::AllocFromModifiedUtf8(soa.Self(), "robot"));
+ EXPECT_TRUE(s8->GetObject(statics)->AsString()->Equals("android"));
+ s8->SetObject(s8->GetDeclaringClass(), String::AllocFromModifiedUtf8(soa.Self(), "robot"));
// TODO: Remove EXPECT_FALSE when GCC can handle EXPECT_EQ
// http://code.google.com/p/googletest/issues/detail?id=322
- EXPECT_FALSE( s0->GetBoolean(NULL));
- EXPECT_EQ(6, s1->GetByte(NULL));
- EXPECT_EQ('b', s2->GetChar(NULL));
- EXPECT_EQ(-535, s3->GetShort(NULL));
- EXPECT_EQ(2000000001, s4->GetInt(NULL));
- EXPECT_EQ(0x34567890abcdef12LL, s5->GetLong(NULL));
- EXPECT_EQ(0.75, s6->GetFloat(NULL));
- EXPECT_EQ(16777219, s7->GetDouble(NULL));
- EXPECT_TRUE(s8->GetObject(NULL)->AsString()->Equals("robot"));
+ EXPECT_FALSE( s0->GetBoolean(statics));
+ EXPECT_EQ(6, s1->GetByte(statics));
+ EXPECT_EQ('b', s2->GetChar(statics));
+ EXPECT_EQ(-535, s3->GetShort(statics));
+ EXPECT_EQ(2000000001, s4->GetInt(statics));
+ EXPECT_EQ(0x34567890abcdef12LL, s5->GetLong(statics));
+ EXPECT_EQ(0.75, s6->GetFloat(statics));
+ EXPECT_EQ(16777219, s7->GetDouble(statics));
+ EXPECT_TRUE(s8->GetObject(statics)->AsString()->Equals("robot"));
}
TEST_F(ClassLinkerTest, Interfaces) {
diff --git a/src/closure.h b/src/closure.h
new file mode 100644
index 0000000..17f2b84
--- /dev/null
+++ b/src/closure.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_CLOSURE_H_
+#define ART_SRC_CLOSURE_H_
+
+namespace art {
+
+class Thread;
+
+class Closure {
+ public:
+ virtual ~Closure() { }
+ virtual void Run(Thread* self) = 0;
+};
+
+} // namespace art
+
+#endif // ART_SRC_CLOSURE_H_
diff --git a/src/common_test.h b/src/common_test.h
index 560edeb..273d70e 100644
--- a/src/common_test.h
+++ b/src/common_test.h
@@ -306,6 +306,7 @@
#error unsupported OS
#endif
setenv("ANDROID_ROOT", root.c_str(), 1);
+ setenv("LD_LIBRARY_PATH", ":", 0); // Required by java.lang.System.<clinit>.
}
// On target, Cannot use /mnt/sdcard because it is mounted noexec, so use subdir of art-cache
@@ -353,6 +354,16 @@
#elif defined(__i386__)
instruction_set = kX86;
#endif
+
+ // TODO: make selectable
+#if defined(ART_USE_PORTABLE_COMPILER)
+ CompilerBackend compiler_backend = kPortable;
+#elif defined(ART_USE_LLVM_COMPILER)
+ CompilerBackend compiler_backend = kIceland; // TODO: remove
+#else
+ CompilerBackend compiler_backend = kQuick;
+#endif
+
runtime_->SetJniDlsymLookupStub(Compiler::CreateJniDlsymLookupStub(instruction_set));
runtime_->SetAbstractMethodErrorStubArray(Compiler::CreateAbstractMethodErrorStub(instruction_set));
for (int i = 0; i < Runtime::kLastTrampolineMethodType; i++) {
@@ -374,7 +385,7 @@
}
class_linker_->FixupDexCaches(runtime_->GetResolutionMethod());
image_classes_.reset(new std::set<std::string>);
- compiler_.reset(new Compiler(instruction_set, true, 2, false, image_classes_.get(),
+ compiler_.reset(new Compiler(compiler_backend, instruction_set, true, 2, false, image_classes_.get(),
true, true));
runtime_->GetHeap()->VerifyHeap(); // Check for heap corruption before the test
diff --git a/src/compiler.cc b/src/compiler.cc
index 8d7f5b6..4029a01 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -35,6 +35,7 @@
#include "ScopedLocalRef.h"
#include "stl_util.h"
#include "thread.h"
+#include "thread_pool.h"
#include "timing_logger.h"
#include "verifier/method_verifier.h"
@@ -231,7 +232,7 @@
DISALLOW_COPY_AND_ASSIGN(AOTCompilationStats);
};
-static std::string MakeCompilerSoName(InstructionSet instruction_set) {
+static std::string MakeCompilerSoName(CompilerBackend compiler_backend, InstructionSet instruction_set) {
// TODO: is the ARM/Thumb2 instruction set distinction really buying us anything,
// or just causing hassle like this?
if (instruction_set == kThumb2) {
@@ -249,13 +250,12 @@
const char* suffix = (kIsDebugBuild ? "d" : "");
// Work out the filename for the compiler library.
-#if defined(ART_USE_LLVM_COMPILER)
- std::string library_name(StringPrintf("art%s-compiler-llvm", suffix));
-#elif defined(ART_USE_GREENLAND_COMPILER)
- std::string library_name(StringPrintf("art%s-compiler-greenland", suffix));
-#else
- std::string library_name(StringPrintf("art%s-compiler-%s", suffix, instruction_set_name.c_str()));
-#endif
+ std::string library_name;
+ if ((compiler_backend == kPortable) || (compiler_backend == kIceland)) {
+ library_name = StringPrintf("art%s-compiler-llvm", suffix);
+ } else {
+ library_name = StringPrintf("art%s-compiler-%s", suffix, instruction_set_name.c_str());
+ }
std::string filename(StringPrintf(OS_SHARED_LIB_FORMAT_STR, library_name.c_str()));
#if defined(__APPLE__)
@@ -291,16 +291,15 @@
return fn;
}
-Compiler::Compiler(InstructionSet instruction_set, bool image, size_t thread_count,
- bool support_debugging, const std::set<std::string>* image_classes,
+Compiler::Compiler(CompilerBackend compiler_backend, InstructionSet instruction_set, bool image,
+ size_t thread_count, bool support_debugging, const std::set<std::string>* image_classes,
bool dump_stats, bool dump_timings)
- : instruction_set_(instruction_set),
+ : compiler_backend_(compiler_backend),
+ instruction_set_(instruction_set),
compiled_classes_lock_("compiled classes lock"),
compiled_methods_lock_("compiled method lock"),
compiled_invoke_stubs_lock_("compiled invoke stubs lock"),
-#if defined(ART_USE_LLVM_COMPILER)
compiled_proxy_stubs_lock_("compiled proxy stubs lock"),
-#endif
image_(image),
thread_count_(thread_count),
support_debugging_(support_debugging),
@@ -313,9 +312,10 @@
compiler_(NULL),
compiler_context_(NULL),
jni_compiler_(NULL),
- create_invoke_stub_(NULL)
+ create_invoke_stub_(NULL),
+ thread_pool_(new ThreadPool(thread_count))
{
- std::string compiler_so_name(MakeCompilerSoName(instruction_set_));
+ std::string compiler_so_name(MakeCompilerSoName(compiler_backend_, instruction_set_));
compiler_library_ = dlopen(compiler_so_name.c_str(), RTLD_LAZY);
if (compiler_library_ == NULL) {
LOG(FATAL) << "Couldn't find compiler library " << compiler_so_name << ": " << dlerror();
@@ -324,36 +324,29 @@
CHECK_PTHREAD_CALL(pthread_key_create, (&tls_key_, NULL), "compiler tls key");
-#if defined(ART_USE_LLVM_COMPILER) || defined(ART_USE_GREENLAND_COMPILER)
- // Initialize compiler_context_
+ // TODO: more work needed to combine initializations and allow per-method backend selection
typedef void (*InitCompilerContextFn)(Compiler&);
-
- InitCompilerContextFn init_compiler_context =
- FindFunction<void (*)(Compiler&)>(compiler_so_name,
- compiler_library_,
- "ArtInitCompilerContext");
+ InitCompilerContextFn init_compiler_context;
+ if ((compiler_backend_ == kPortable) || (compiler_backend_ == kIceland)){
+ // Initialize compiler_context_
+ init_compiler_context = FindFunction<void (*)(Compiler&)>(compiler_so_name,
+ compiler_library_, "ArtInitCompilerContext");
+ compiler_ = FindFunction<CompilerFn>(compiler_so_name, compiler_library_, "ArtCompileMethod");
+ } else {
+ init_compiler_context = FindFunction<void (*)(Compiler&)>(compiler_so_name,
+ compiler_library_, "ArtInitQuickCompilerContext");
+ compiler_ = FindFunction<CompilerFn>(compiler_so_name, compiler_library_, "ArtQuickCompileMethod");
+ }
init_compiler_context(*this);
-#elif defined(ART_USE_QUICK_COMPILER)
- // Initialize compiler_context_
- typedef void (*InitCompilerContextFn)(Compiler&);
- InitCompilerContextFn init_compiler_context =
- FindFunction<void (*)(Compiler&)>(compiler_so_name,
- compiler_library_,
- "ArtInitQuickCompilerContext");
-
- init_compiler_context(*this);
-#endif
-
- compiler_ = FindFunction<CompilerFn>(compiler_so_name, compiler_library_, "ArtCompileMethod");
jni_compiler_ = FindFunction<JniCompilerFn>(compiler_so_name, compiler_library_, "ArtJniCompileMethod");
create_invoke_stub_ = FindFunction<CreateInvokeStubFn>(compiler_so_name, compiler_library_, "ArtCreateInvokeStub");
-#if defined(ART_USE_LLVM_COMPILER)
- create_proxy_stub_ = FindFunction<CreateProxyStubFn>(
- compiler_so_name, compiler_library_, "ArtCreateProxyStub");
-#endif
+ if ((compiler_backend_ == kPortable) || (compiler_backend_ == kIceland)) {
+ create_proxy_stub_ = FindFunction<CreateProxyStubFn>(
+ compiler_so_name, compiler_library_, "ArtCreateProxyStub");
+ }
CHECK(!Runtime::Current()->IsStarted());
if (!image_) {
@@ -375,12 +368,10 @@
MutexLock mu(self, compiled_invoke_stubs_lock_);
STLDeleteValues(&compiled_invoke_stubs_);
}
-#if defined(ART_USE_LLVM_COMPILER)
{
MutexLock mu(self, compiled_proxy_stubs_lock_);
STLDeleteValues(&compiled_proxy_stubs_);
}
-#endif
{
MutexLock mu(self, compiled_methods_lock_);
STLDeleteElements(&code_to_patch_);
@@ -390,34 +381,21 @@
STLDeleteElements(&methods_to_patch_);
}
CHECK_PTHREAD_CALL(pthread_key_delete, (tls_key_), "delete tls key");
-#if defined(ART_USE_LLVM_COMPILER)
- // Uninitialize compiler_context_
typedef void (*UninitCompilerContextFn)(Compiler&);
-
- std::string compiler_so_name(MakeCompilerSoName(instruction_set_));
-
- UninitCompilerContextFn uninit_compiler_context =
- FindFunction<void (*)(Compiler&)>(compiler_so_name,
- compiler_library_,
- "ArtUnInitCompilerContext");
-
- uninit_compiler_context(*this);
-#elif defined(ART_USE_QUICK_COMPILER)
+ std::string compiler_so_name(MakeCompilerSoName(compiler_backend_, instruction_set_));
+ UninitCompilerContextFn uninit_compiler_context;
// Uninitialize compiler_context_
- typedef void (*UninitCompilerContextFn)(Compiler&);
-
- std::string compiler_so_name(MakeCompilerSoName(instruction_set_));
-
- UninitCompilerContextFn uninit_compiler_context =
- FindFunction<void (*)(Compiler&)>(compiler_so_name,
- compiler_library_,
- "ArtUnInitQuickCompilerContext");
-
+ // TODO: rework to combine initialization/uninitialization
+ if ((compiler_backend_ == kPortable) || (compiler_backend_ == kIceland)) {
+ uninit_compiler_context = FindFunction<void (*)(Compiler&)>(compiler_so_name,
+ compiler_library_, "ArtUnInitCompilerContext");
+ } else {
+ uninit_compiler_context = FindFunction<void (*)(Compiler&)>(compiler_so_name,
+ compiler_library_, "ArtUnInitQuickCompilerContext");
+ }
uninit_compiler_context(*this);
-#endif
if (compiler_library_ != NULL) {
VLOG(compiler) << "dlclose(" << compiler_library_ << ")";
-#if !defined(ART_USE_QUICK_COMPILER)
/*
* FIXME: Temporary workaround
* Apparently, llvm is adding dctors to atexit, but if we unload
@@ -430,7 +408,6 @@
* What's the right thing to do here?
*/
dlclose(compiler_library_);
-#endif
}
}
@@ -839,15 +816,15 @@
// invoked, so this can be passed to the out-of-line runtime support code.
direct_code = 0;
direct_method = 0;
-#if !defined(ART_USE_LLVM_COMPILER)
- if (sharp_type != kStatic && sharp_type != kDirect && sharp_type != kInterface) {
- return;
+ if ((compiler_backend_ == kPortable) || (compiler_backend_ == kIceland)) {
+ if (sharp_type != kStatic && sharp_type != kDirect) {
+ return;
+ }
+ } else {
+ if (sharp_type != kStatic && sharp_type != kDirect && sharp_type != kInterface) {
+ return;
+ }
}
-#else
- if (sharp_type != kStatic && sharp_type != kDirect) {
- return;
- }
-#endif
bool method_code_in_boot = method->GetDeclaringClass()->GetClassLoader() == NULL;
if (!method_code_in_boot) {
return;
@@ -980,14 +957,18 @@
class CompilationContext {
public:
+ typedef void Callback(const CompilationContext* context, size_t index);
+
CompilationContext(ClassLinker* class_linker,
jobject class_loader,
Compiler* compiler,
- const DexFile* dex_file)
+ const DexFile* dex_file,
+ ThreadPool* thread_pool)
: class_linker_(class_linker),
class_loader_(class_loader),
compiler_(compiler),
- dex_file_(dex_file) {}
+ dex_file_(dex_file),
+ thread_pool_(thread_pool) {}
ClassLinker* GetClassLinker() const {
CHECK(class_linker_ != NULL);
@@ -1008,96 +989,64 @@
return dex_file_;
}
+ void ForAll(size_t begin, size_t end, Callback callback, size_t work_units) {
+ Thread* self = Thread::Current();
+ self->AssertNoPendingException();
+ CHECK_GT(work_units, 0U);
+
+ std::vector<Closure*> closures(work_units);
+ for (size_t i = 0; i < work_units; ++i) {
+ closures[i] = new ForAllClosure(this, begin + i, end, callback, work_units);
+ thread_pool_->AddTask(self, closures[i]);
+ }
+ thread_pool_->StartWorkers(self);
+
+ // Ensure we're suspended while we're blocked waiting for the other threads to finish (worker
+ // thread destructor's called below perform join).
+ CHECK_NE(self->GetState(), kRunnable);
+
+ // Wait for all the worker threads to finish.
+ thread_pool_->Wait(self);
+
+ STLDeleteElements(&closures);
+ }
+
private:
+
+ class ForAllClosure : public Closure {
+ public:
+ ForAllClosure(CompilationContext* context, size_t begin, size_t end, Callback* callback,
+ size_t stripe)
+ : context_(context),
+ begin_(begin),
+ end_(end),
+ callback_(callback),
+ stripe_(stripe)
+ {
+
+ }
+
+ virtual void Run(Thread* self) {
+ for (size_t i = begin_; i < end_; i += stripe_) {
+ callback_(context_, i);
+ self->AssertNoPendingException();
+ }
+ }
+ private:
+ CompilationContext* const context_;
+ const size_t begin_;
+ const size_t end_;
+ const Callback* callback_;
+ const size_t stripe_;
+ };
+
ClassLinker* const class_linker_;
const jobject class_loader_;
Compiler* const compiler_;
const DexFile* const dex_file_;
+ ThreadPool* thread_pool_;
};
-typedef void Callback(const CompilationContext* context, size_t index);
-
-static void ForAll(CompilationContext* context, size_t begin, size_t end, Callback callback,
- size_t thread_count);
-
-class WorkerThread {
- public:
- WorkerThread(CompilationContext* context, size_t begin, size_t end, Callback callback, size_t stripe, bool spawn)
- : spawn_(spawn), context_(context), begin_(begin), end_(end), callback_(callback), stripe_(stripe) {
- if (spawn_) {
- // Mac OS stacks are only 512KiB. Make sure we have the same stack size on all platforms.
- pthread_attr_t attr;
- CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new compiler worker thread");
- CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, 1*MB), "new compiler worker thread");
- CHECK_PTHREAD_CALL(pthread_create, (&pthread_, &attr, &Go, this), "new compiler worker thread");
- CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new compiler worker thread");
- }
- }
-
- ~WorkerThread() {
- if (spawn_) {
- CHECK_PTHREAD_CALL(pthread_join, (pthread_, NULL), "compiler worker shutdown");
- }
- }
-
- private:
- static void* Go(void* arg) LOCKS_EXCLUDED(Locks::mutator_lock_) {
- WorkerThread* worker = reinterpret_cast<WorkerThread*>(arg);
- Runtime* runtime = Runtime::Current();
- if (worker->spawn_) {
- CHECK(runtime->AttachCurrentThread("Compiler Worker", true, NULL));
- }
- worker->Run();
- if (worker->spawn_) {
- runtime->DetachCurrentThread();
- }
- return NULL;
- }
-
- void Go() LOCKS_EXCLUDED(Locks::mutator_lock_) {
- Go(this);
- }
-
- void Run() LOCKS_EXCLUDED(Locks::mutator_lock_) {
- Thread* self = Thread::Current();
- for (size_t i = begin_; i < end_; i += stripe_) {
- callback_(context_, i);
- self->AssertNoPendingException();
- }
- }
-
- pthread_t pthread_;
- // Was this thread spawned or is it the main thread?
- const bool spawn_;
-
- const CompilationContext* const context_;
- const size_t begin_;
- const size_t end_;
- Callback* callback_;
- const size_t stripe_;
-
- friend void ForAll(CompilationContext*, size_t, size_t, Callback, size_t);
-};
-
-static void ForAll(CompilationContext* context, size_t begin, size_t end, Callback callback,
- size_t thread_count)
- LOCKS_EXCLUDED(Locks::mutator_lock_) {
- Thread* self = Thread::Current();
- self->AssertNoPendingException();
- CHECK_GT(thread_count, 0U);
-
- std::vector<WorkerThread*> threads;
- for (size_t i = 0; i < thread_count; ++i) {
- threads.push_back(new WorkerThread(context, begin + i, end, callback, thread_count, (i != 0)));
- }
- threads[0]->Go();
-
- // Ensure we're suspended while we're blocked waiting for the other threads to finish (worker
- // thread destructor's called below perform join).
- CHECK_NE(self->GetState(), kRunnable);
- STLDeleteElements(&threads);
-}
-
// Return true if the class should be skipped during compilation. We
// never skip classes in the boot class loader. However, if we have a
// non-boot class loader and we can resolve the class in the boot
@@ -1216,11 +1165,11 @@
// TODO: we could resolve strings here, although the string table is largely filled with class
// and method names.
- CompilationContext context(class_linker, class_loader, this, &dex_file);
- ForAll(&context, 0, dex_file.NumTypeIds(), ResolveType, thread_count_);
+ CompilationContext context(class_linker, class_loader, this, &dex_file, thread_pool_.get());
+ context.ForAll(0, dex_file.NumTypeIds(), ResolveType, thread_count_);
timings.AddSplit("Resolve " + dex_file.GetLocation() + " Types");
- ForAll(&context, 0, dex_file.NumClassDefs(), ResolveClassFieldsAndMethods, thread_count_);
+ context.ForAll(0, dex_file.NumClassDefs(), ResolveClassFieldsAndMethods, thread_count_);
timings.AddSplit("Resolve " + dex_file.GetLocation() + " MethodsAndFields");
}
@@ -1281,8 +1230,8 @@
void Compiler::VerifyDexFile(jobject class_loader, const DexFile& dex_file, TimingLogger& timings) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- CompilationContext context(class_linker, class_loader, this, &dex_file);
- ForAll(&context, 0, dex_file.NumClassDefs(), VerifyClass, thread_count_);
+ CompilationContext context(class_linker, class_loader, this, &dex_file, thread_pool_.get());
+ context.ForAll(0, dex_file.NumClassDefs(), VerifyClass, thread_count_);
timings.AddSplit("Verify " + dex_file.GetLocation());
}
@@ -1326,8 +1275,8 @@
void Compiler::InitializeClassesWithoutClinit(jobject jni_class_loader, const DexFile& dex_file,
TimingLogger& timings) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- CompilationContext context(class_linker, jni_class_loader, this, &dex_file);
- ForAll(&context, 0, dex_file.NumClassDefs(), InitializeClassWithoutClinit, thread_count_);
+ CompilationContext context(class_linker, jni_class_loader, this, &dex_file, thread_pool_.get());
+ context.ForAll(0, dex_file.NumClassDefs(), InitializeClassWithoutClinit, thread_count_);
timings.AddSplit("InitializeNoClinit " + dex_file.GetLocation());
}
@@ -1416,8 +1365,8 @@
void Compiler::CompileDexFile(jobject class_loader, const DexFile& dex_file,
TimingLogger& timings) {
- CompilationContext context(NULL, class_loader, this, &dex_file);
- ForAll(&context, 0, dex_file.NumClassDefs(), Compiler::CompileClass, thread_count_);
+ CompilationContext context(NULL, class_loader, this, &dex_file, thread_pool_.get());
+ context.ForAll(0, dex_file.NumClassDefs(), Compiler::CompileClass, thread_count_);
timings.AddSplit("Compile " + dex_file.GetLocation());
}
@@ -1472,8 +1421,7 @@
InsertInvokeStub(key, compiled_invoke_stub);
}
-#if defined(ART_USE_LLVM_COMPILER)
- if (!is_static) {
+ if (((compiler_backend_ == kPortable) || (compiler_backend_ == kIceland)) && !is_static) {
const CompiledInvokeStub* compiled_proxy_stub = FindProxyStub(shorty);
if (compiled_proxy_stub == NULL) {
compiled_proxy_stub = (*create_proxy_stub_)(*this, shorty, shorty_len);
@@ -1481,7 +1429,6 @@
InsertProxyStub(shorty, compiled_proxy_stub);
}
}
-#endif
if (self->IsExceptionPending()) {
ScopedObjectAccess soa(self);
@@ -1518,7 +1465,6 @@
}
}
-#if defined(ART_USE_LLVM_COMPILER)
const CompiledInvokeStub* Compiler::FindProxyStub(const char* shorty) const {
MutexLock mu(Thread::Current(), compiled_proxy_stubs_lock_);
ProxyStubTable::const_iterator it = compiled_proxy_stubs_.find(shorty);
@@ -1541,7 +1487,6 @@
compiled_proxy_stubs_.Put(shorty, compiled_proxy_stub);
}
}
-#endif
CompiledClass* Compiler::GetCompiledClass(ClassReference ref) const {
MutexLock mu(Thread::Current(), compiled_classes_lock_);
@@ -1563,17 +1508,15 @@
return it->second;
}
-#if defined(ART_USE_LLVM_COMPILER) || defined(ART_USE_QUICK_COMPILER)
void Compiler::SetBitcodeFileName(std::string const& filename) {
typedef void (*SetBitcodeFileNameFn)(Compiler&, std::string const&);
SetBitcodeFileNameFn set_bitcode_file_name =
- FindFunction<SetBitcodeFileNameFn>(MakeCompilerSoName(instruction_set_),
+ FindFunction<SetBitcodeFileNameFn>(MakeCompilerSoName(compiler_backend_, instruction_set_),
compiler_library_,
"compilerLLVMSetBitcodeFileName");
set_bitcode_file_name(*this, filename);
}
-#endif
} // namespace art
diff --git a/src/compiler.h b/src/compiler.h
index 5e9dbd7..ba56513 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -32,6 +32,7 @@
#include "object.h"
#include "runtime.h"
#include "safe_map.h"
+#include "thread_pool.h"
namespace art {
@@ -40,9 +41,15 @@
class OatCompilationUnit;
class TimingLogger;
+enum CompilerBackend {
+ kQuick,
+ kQuickGBC,
+ kPortable,
+ kIceland // Temporary - remove soon
+};
+
// Thread-local storage compiler worker threads
class CompilerTls {
-#if defined(ART_USE_QUICK_COMPILER)
public:
CompilerTls() : llvm_info_(NULL) {}
~CompilerTls() {}
@@ -53,7 +60,6 @@
private:
void* llvm_info_;
-#endif
};
class Compiler {
@@ -63,9 +69,9 @@
// enabled. "image_classes" lets the compiler know what classes it
// can assume will be in the image, with NULL implying all available
// classes.
- explicit Compiler(InstructionSet instruction_set, bool image, size_t thread_count,
- bool support_debugging, const std::set<std::string>* image_classes,
- bool dump_stats, bool dump_timings);
+ explicit Compiler(CompilerBackend compiler_backend, InstructionSet instruction_set, bool image,
+ size_t thread_count, bool support_debugging,
+ const std::set<std::string>* image_classes, bool dump_stats, bool dump_timings);
~Compiler();
@@ -84,6 +90,10 @@
return instruction_set_;
}
+ CompilerBackend GetCompilerBackend() const {
+ return compiler_backend_;
+ }
+
bool IsImage() const {
return image_;
}
@@ -119,9 +129,7 @@
const CompiledInvokeStub* FindInvokeStub(const std::string& key) const
LOCKS_EXCLUDED(compiled_invoke_stubs_lock_);
-#if defined(ART_USE_LLVM_COMPILER)
const CompiledInvokeStub* FindProxyStub(const char* shorty) const;
-#endif
// Callbacks from compiler to see what runtime checks must be generated.
@@ -175,9 +183,7 @@
size_t literal_offset)
LOCKS_EXCLUDED(compiled_methods_lock_);
-#if defined(ART_USE_LLVM_COMPILER) || defined(ART_USE_QUICK_COMPILER)
void SetBitcodeFileName(std::string const& filename);
-#endif
void SetCompilerContext(void* compiler_context) {
compiler_context_ = compiler_context;
@@ -298,13 +304,13 @@
void InsertInvokeStub(const std::string& key, const CompiledInvokeStub* compiled_invoke_stub)
LOCKS_EXCLUDED(compiled_invoke_stubs_lock_);
-#if defined(ART_USE_LLVM_COMPILER)
void InsertProxyStub(const char* shorty, const CompiledInvokeStub* compiled_proxy_stub);
-#endif
std::vector<const PatchInformation*> code_to_patch_;
std::vector<const PatchInformation*> methods_to_patch_;
+ CompilerBackend compiler_backend_;
+
InstructionSet instruction_set_;
typedef SafeMap<const ClassReference, CompiledClass*> ClassTable;
@@ -322,12 +328,10 @@
mutable Mutex compiled_invoke_stubs_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
InvokeStubTable compiled_invoke_stubs_ GUARDED_BY(compiled_invoke_stubs_lock_);
-#if defined(ART_USE_LLVM_COMPILER)
typedef SafeMap<std::string, const CompiledInvokeStub*> ProxyStubTable;
// Proxy stubs created for proxy invocation delegation
mutable Mutex compiled_proxy_stubs_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
ProxyStubTable compiled_proxy_stubs_ GUARDED_BY(compiled_proxy_stubs_lock_);
-#endif
bool image_;
size_t thread_count_;
@@ -341,10 +345,8 @@
const std::set<std::string>* image_classes_;
-#if defined(ART_USE_LLVM_COMPILER)
typedef void (*CompilerCallbackFn)(Compiler& compiler);
typedef MutexLock* (*CompilerMutexLockFn)(Compiler& compiler);
-#endif
void* compiler_library_;
@@ -365,9 +367,10 @@
const char* shorty, uint32_t shorty_len);
CreateInvokeStubFn create_invoke_stub_;
+ UniquePtr<ThreadPool> thread_pool_;
+
pthread_key_t tls_key_;
-#if defined(ART_USE_LLVM_COMPILER)
typedef CompiledInvokeStub* (*CreateProxyStubFn)
(Compiler& compiler, const char* shorty, uint32_t shorty_len);
CreateProxyStubFn create_proxy_stub_;
@@ -382,7 +385,6 @@
typedef const AbstractMethod::InvokeStub* (*CompilerGetMethodInvokeStubAddrFn)
(const Compiler& compiler, const CompiledInvokeStub* cm, const AbstractMethod* method);
CompilerGetMethodInvokeStubAddrFn compiler_get_method_invoke_stub_addr_;
-#endif
DISALLOW_COPY_AND_ASSIGN(Compiler);
diff --git a/src/compiler/Compiler.h b/src/compiler/Compiler.h
index 7eb32c2..11214cf 100644
--- a/src/compiler/Compiler.h
+++ b/src/compiler/Compiler.h
@@ -20,20 +20,16 @@
#include "dex_file.h"
#include "dex_instruction.h"
-#if defined(ART_USE_QUICK_COMPILER)
namespace llvm {
class Module;
class LLVMContext;
}
-#endif
namespace art {
-#if defined(ART_USE_QUICK_COMPILER)
namespace greenland {
class IntrinsicHelper;
class IRBuilder;
}
-#endif
#define COMPILER_TRACED(X)
#define COMPILER_TRACEE(X)
@@ -141,10 +137,8 @@
kDebugShowNops,
kDebugCountOpcodes,
kDebugDumpCheckStats,
-#if defined(ART_USE_QUICK_COMPILER)
kDebugDumpBitcodeFile,
kDebugVerifyBitcode,
-#endif
};
enum OatMethodAttributes {
@@ -177,7 +171,6 @@
kReversePostOrderTraversal, // Depth-First-Search / reverse Post-Order
};
-#if defined(ART_USE_QUICK_COMPILER)
class LLVMInfo {
public:
LLVMInfo();
@@ -205,7 +198,6 @@
UniquePtr<art::greenland::IntrinsicHelper> intrinsic_helper_;
UniquePtr<art::greenland::IRBuilder> ir_builder_;
};
-#endif
struct CompilationUnit;
struct BasicBlock;
diff --git a/src/compiler/CompilerIR.h b/src/compiler/CompilerIR.h
index 5a10831..d08af07 100644
--- a/src/compiler/CompilerIR.h
+++ b/src/compiler/CompilerIR.h
@@ -23,10 +23,8 @@
#include "CompilerUtility.h"
#include "oat_compilation_unit.h"
#include "safe_map.h"
-#if defined(ART_USE_QUICK_COMPILER)
#include "greenland/ir_builder.h"
#include "llvm/Module.h"
-#endif
namespace art {
@@ -262,9 +260,7 @@
bool catchEntry;
bool explicitThrow;
bool conditionalBranch;
-#if defined(ART_USE_QUICK_COMPILER)
bool hasReturn;
-#endif
uint16_t startOffset;
uint16_t nestingDepth;
BBType blockType;
@@ -384,7 +380,6 @@
numArenaBlocks(0),
mstats(NULL),
checkstats(NULL),
-#if defined(ART_USE_QUICK_COMPILER)
genBitcode(false),
context(NULL),
module(NULL),
@@ -397,7 +392,6 @@
tempName(0),
numShadowFrameEntries(0),
shadowMap(NULL),
-#endif
#ifndef NDEBUG
liveSReg(0),
#endif
@@ -547,7 +541,6 @@
int numArenaBlocks;
Memstats* mstats;
Checkstats* checkstats;
-#if defined(ART_USE_QUICK_COMPILER)
bool genBitcode;
LLVMInfo* llvm_info;
llvm::LLVMContext* context;
@@ -567,7 +560,6 @@
int numShadowFrameEntries;
int* shadowMap;
std::set<llvm::BasicBlock*> llvmBlocks;
-#endif
#ifndef NDEBUG
/*
* Sanity checking for the register temp tracking. The same ssa
diff --git a/src/compiler/Dataflow.cc b/src/compiler/Dataflow.cc
index 0058575..c59b637 100644
--- a/src/compiler/Dataflow.cc
+++ b/src/compiler/Dataflow.cc
@@ -1854,12 +1854,10 @@
case Instruction::CMPG_FLOAT:
case Instruction::CMPG_DOUBLE:
case Instruction::CMP_LONG:
-#if defined(ART_USE_QUICK_COMPILER)
if (cUnit->genBitcode) {
// Bitcode doesn't allow this optimization.
break;
}
-#endif
if (mir->next != NULL) {
MIR* mirNext = mir->next;
Instruction::Code brOpcode = mirNext->dalvikInsn.opcode;
diff --git a/src/compiler/Frontend.cc b/src/compiler/Frontend.cc
index c19751d..c5d5c21 100644
--- a/src/compiler/Frontend.cc
+++ b/src/compiler/Frontend.cc
@@ -21,7 +21,6 @@
#include "object.h"
#include "runtime.h"
-#if defined(ART_USE_QUICK_COMPILER)
#include <llvm/Support/Threading.h>
namespace {
@@ -32,11 +31,9 @@
llvm::llvm_start_multithreaded();
}
}
-#endif
namespace art {
-#if defined(ART_USE_QUICK_COMPILER)
LLVMInfo::LLVMInfo() {
#if !defined(ART_USE_LLVM_COMPILER)
pthread_once(&llvm_multi_init, InitializeLLVMForQuick);
@@ -62,7 +59,6 @@
delete reinterpret_cast<LLVMInfo*>(compiler.GetCompilerContext());
compiler.SetCompilerContext(NULL);
}
-#endif
/* Default optimizer/debug setting for the compiler. */
static uint32_t kCompilerOptimizerDisableFlags = 0 | // Disable specific optimizations
@@ -94,10 +90,8 @@
//(1 << kDebugShowNops) |
//(1 << kDebugCountOpcodes) |
//(1 << kDebugDumpCheckStats) |
-#if defined(ART_USE_QUICK_COMPILER)
//(1 << kDebugDumpBitcodeFile) |
//(1 << kDebugVerifyBitcode) |
-#endif
0;
inline bool contentIsInsn(const u2* codePtr) {
@@ -788,14 +782,12 @@
}
CompiledMethod* compileMethod(Compiler& compiler,
+ const CompilerBackend compilerBackend,
const DexFile::CodeItem* code_item,
uint32_t access_flags, InvokeType invoke_type,
uint32_t method_idx, jobject class_loader,
- const DexFile& dex_file
-#if defined(ART_USE_QUICK_COMPILER)
- , LLVMInfo* llvm_info,
- bool gbcOnly
-#endif
+ const DexFile& dex_file,
+ LLVMInfo* llvm_info
)
{
VLOG(compiler) << "Compiling " << PrettyMethod(method_idx, dex_file) << "...";
@@ -824,16 +816,18 @@
cUnit->numIns = code_item->ins_size_;
cUnit->numRegs = code_item->registers_size_ - cUnit->numIns;
cUnit->numOuts = code_item->outs_size_;
-#if defined(ART_USE_QUICK_COMPILER)
DCHECK((cUnit->instructionSet == kThumb2) ||
(cUnit->instructionSet == kX86) ||
(cUnit->instructionSet == kMips));
- cUnit->llvm_info = llvm_info;
- if (cUnit->instructionSet == kThumb2) {
- // TODO: remove this once x86 is tested
+ if ((compilerBackend == kQuickGBC) || (compilerBackend == kPortable)) {
cUnit->genBitcode = true;
}
-#endif
+ DCHECK_NE(compilerBackend, kIceland); // TODO: remove when Portable/Iceland merge complete
+ // TODO: remove this once x86 is tested
+ if (cUnit->genBitcode && (cUnit->instructionSet != kThumb2)) {
+ UNIMPLEMENTED(WARNING) << "GBC generation untested for non-Thumb targets";
+ }
+ cUnit->llvm_info = llvm_info;
/* Adjust this value accordingly once inlining is performed */
cUnit->numDalvikRegisters = code_item->registers_size_;
// TODO: set this from command line
@@ -848,13 +842,9 @@
cUnit->printMe = VLOG_IS_ON(compiler) ||
(cUnit->enableDebug & (1 << kDebugVerbose));
}
-#if defined(ART_USE_QUICK_COMPILER)
- if (cUnit->genBitcode) {
#ifndef NDEBUG
+ if (cUnit->genBitcode) {
cUnit->enableDebug |= (1 << kDebugVerifyBitcode);
-#endif
- //cUnit->printMe = true;
- //cUnit->enableDebug |= (1 << kDebugDumpBitcodeFile);
}
#endif
@@ -1098,21 +1088,14 @@
}
if (cUnit->qdMode) {
-#if !defined(ART_USE_QUICK_COMPILER)
// Bitcode generation requires full dataflow analysis
- cUnit->disableDataflow = true;
-#endif
+ cUnit->disableDataflow = !cUnit->genBitcode;
// Disable optimization which require dataflow/ssa
- cUnit->disableOpt |=
-#if !defined(ART_USE_QUICK_COMPILER)
- (1 << kNullCheckElimination) |
-#endif
- (1 << kBBOpt) |
- (1 << kPromoteRegs);
+ cUnit->disableOpt |= (1 << kBBOpt) | (1 << kPromoteRegs) | (1 << kNullCheckElimination);
if (cUnit->printMe) {
LOG(INFO) << "QD mode enabled: "
<< PrettyMethod(method_idx, dex_file)
- << " too big: " << cUnit->numBlocks;
+ << " num blocks: " << cUnit->numBlocks;
}
}
@@ -1168,12 +1151,11 @@
/* Allocate Registers using simple local allocation scheme */
oatSimpleRegAlloc(cUnit.get());
-#if defined(ART_USE_QUICK_COMPILER)
/* Go the LLVM path? */
if (cUnit->genBitcode) {
// MIR->Bitcode
oatMethodMIR2Bitcode(cUnit.get());
- if (gbcOnly) {
+ if (compilerBackend == kPortable) {
// all done
oatArenaReset(cUnit.get());
return NULL;
@@ -1181,7 +1163,6 @@
// Bitcode->LIR
oatMethodBitcode2LIR(cUnit.get());
} else {
-#endif
if (specialCase != kNoHandler) {
/*
* Custom codegen for special cases. If for any reason the
@@ -1195,9 +1176,7 @@
if (cUnit->firstLIRInsn == NULL) {
oatMethodMIR2LIR(cUnit.get());
}
-#if defined(ART_USE_QUICK_COMPILER)
}
-#endif
// Debugging only
if (cUnit->enableDebug & (1 << kDebugDumpCFG)) {
@@ -1268,55 +1247,30 @@
return result;
}
-#if defined(ART_USE_QUICK_COMPILER)
CompiledMethod* oatCompileMethod(Compiler& compiler,
+ const CompilerBackend backend,
const DexFile::CodeItem* code_item,
uint32_t access_flags, InvokeType invoke_type,
uint32_t method_idx, jobject class_loader,
- const DexFile& dex_file)
+ const DexFile& dex_file,
+ LLVMInfo* llvmInfo)
{
- return compileMethod(compiler, code_item, access_flags, invoke_type, method_idx, class_loader,
- dex_file, NULL, false);
+ return compileMethod(compiler, backend, code_item, access_flags, invoke_type, method_idx, class_loader,
+ dex_file, llvmInfo);
}
-/*
- * Given existing llvm module, context, intrinsic_helper and IRBuilder,
- * add the bitcode for the method described by code_item to the module.
- */
-void oatCompileMethodToGBC(Compiler& compiler,
- const DexFile::CodeItem* code_item,
- uint32_t access_flags, InvokeType invoke_type,
- uint32_t method_idx, jobject class_loader,
- const DexFile& dex_file,
- LLVMInfo* llvm_info)
-{
- compileMethod(compiler, code_item, access_flags, invoke_type, method_idx, class_loader,
- dex_file, llvm_info, true);
-}
-#else
-CompiledMethod* oatCompileMethod(Compiler& compiler,
- const DexFile::CodeItem* code_item,
- uint32_t access_flags, InvokeType invoke_type,
- uint32_t method_idx, jobject class_loader,
- const DexFile& dex_file)
-{
- return compileMethod(compiler, code_item, access_flags, invoke_type, method_idx, class_loader,
- dex_file);
-}
-#endif
-
} // namespace art
-#if !defined(ART_USE_LLVM_COMPILER)
extern "C" art::CompiledMethod*
- ArtCompileMethod(art::Compiler& compiler,
- const art::DexFile::CodeItem* code_item,
- uint32_t access_flags, art::InvokeType invoke_type,
- uint32_t method_idx, jobject class_loader,
- const art::DexFile& dex_file)
+ ArtQuickCompileMethod(art::Compiler& compiler,
+ const art::DexFile::CodeItem* code_item,
+ uint32_t access_flags, art::InvokeType invoke_type,
+ uint32_t method_idx, jobject class_loader,
+ const art::DexFile& dex_file)
{
CHECK_EQ(compiler.GetInstructionSet(), art::oatInstructionSet());
- return art::oatCompileMethod(compiler, code_item, access_flags, invoke_type,
- method_idx, class_loader, dex_file);
+ // TODO: check method fingerprint here to determine appropriate backend type. Until then, use build default
+ art::CompilerBackend backend = compiler.GetCompilerBackend();
+ return art::oatCompileMethod(compiler, backend, code_item, access_flags, invoke_type,
+ method_idx, class_loader, dex_file, NULL /* use thread llvmInfo */);
}
-#endif
diff --git a/src/compiler/Ralloc.cc b/src/compiler/Ralloc.cc
index bf69ce4..ca25b38 100644
--- a/src/compiler/Ralloc.cc
+++ b/src/compiler/Ralloc.cc
@@ -485,19 +485,12 @@
}
}
-#if defined(ART_USE_QUICK_COMPILER)
if (!cUnit->genBitcode) {
/* Remap names */
oatDataFlowAnalysisDispatcher(cUnit, remapNames,
kPreOrderDFSTraversal,
false /* isIterative */);
}
-#else
- /* Remap names */
- oatDataFlowAnalysisDispatcher(cUnit, remapNames,
- kPreOrderDFSTraversal,
- false /* isIterative */);
-#endif
/* Do type & size inference pass */
oatDataFlowAnalysisDispatcher(cUnit, inferTypeAndSize,
diff --git a/src/compiler/codegen/MethodBitcode.cc b/src/compiler/codegen/MethodBitcode.cc
index cf07ea4..7920883 100644
--- a/src/compiler/codegen/MethodBitcode.cc
+++ b/src/compiler/codegen/MethodBitcode.cc
@@ -14,7 +14,6 @@
* limitations under the License.
*/
-#if defined(ART_USE_QUICK_COMPILER)
#include "object_utils.h"
#include <llvm/Support/ToolOutputFile.h>
@@ -3537,5 +3536,3 @@
} // namespace art
-
-#endif // ART_USE_QUICK_COMPILER
diff --git a/src/compiler_llvm/compilation_unit.cc b/src/compiler_llvm/compilation_unit.cc
index ba71aee..a27ea6e 100644
--- a/src/compiler_llvm/compilation_unit.cc
+++ b/src/compiler_llvm/compilation_unit.cc
@@ -154,7 +154,7 @@
llvm::FunctionPass*
CreateGBCExpanderPass(const greenland::IntrinsicHelper& intrinsic_helper,
IRBuilder& irb);
-#elif defined(ART_USE_QUICK_COMPILER)
+#elif defined(ART_USE_PORTABLE_COMPILER)
llvm::FunctionPass*
CreateGBCExpanderPass(const greenland::IntrinsicHelper& intrinsic_helper, IRBuilder& irb,
Compiler* compiler, OatCompilationUnit* oat_compilation_unit);
@@ -166,7 +166,7 @@
CompilationUnit::CompilationUnit(const CompilerLLVM* compiler_llvm,
size_t cunit_idx)
: compiler_llvm_(compiler_llvm), cunit_idx_(cunit_idx) {
-#if !defined(ART_USE_QUICK_COMPILER)
+#if !defined(ART_USE_PORTABLE_COMPILER)
context_.reset(new llvm::LLVMContext());
module_ = new llvm::Module("art", *context_);
#else
@@ -210,7 +210,7 @@
CompilationUnit::~CompilationUnit() {
#if defined(ART_USE_DEXLANG_FRONTEND)
delete dex_lang_ctx_;
-#elif defined(ART_USE_QUICK_COMPILER)
+#elif defined(ART_USE_PORTABLE_COMPILER)
llvm::LLVMContext* llvm_context = context_.release(); // Managed by llvm_info_
CHECK(llvm_context != NULL);
#endif
@@ -330,7 +330,7 @@
// regular FunctionPass.
#if defined(ART_USE_DEXLANG_FRONTEND)
fpm.add(CreateGBCExpanderPass(dex_lang_ctx_->GetIntrinsicHelper(), *irb_.get()));
-#elif defined(ART_USE_QUICK_COMPILER)
+#elif defined(ART_USE_PORTABLE_COMPILER)
fpm.add(CreateGBCExpanderPass(*llvm_info_->GetIntrinsicHelper(), *irb_.get(),
compiler_, oat_compilation_unit_));
#endif
@@ -340,7 +340,7 @@
llvm::FunctionPassManager fpm2(module_);
#if defined(ART_USE_DEXLANG_FRONTEND)
fpm2.add(CreateGBCExpanderPass(dex_lang_ctx_->GetIntrinsicHelper(), *irb_.get()));
-#elif defined(ART_USE_QUICK_COMPILER)
+#elif defined(ART_USE_PORTABLE_COMPILER)
fpm2.add(CreateGBCExpanderPass(*llvm_info_->GetIntrinsicHelper(), *irb_.get(),
compiler_, oat_compilation_unit_));
#endif
diff --git a/src/compiler_llvm/compilation_unit.h b/src/compiler_llvm/compilation_unit.h
index 6ad7ee1..0b40388 100644
--- a/src/compiler_llvm/compilation_unit.h
+++ b/src/compiler_llvm/compilation_unit.h
@@ -28,7 +28,7 @@
#include "runtime_support_func.h"
#include "safe_map.h"
-#if defined(ART_USE_QUICK_COMPILER)
+#if defined(ART_USE_PORTABLE_COMPILER)
# include "compiler/Dalvik.h"
# include "compiler.h"
# include "oat_compilation_unit.h"
@@ -90,7 +90,7 @@
bitcode_filename_ = bitcode_filename;
}
-#if defined(ART_USE_QUICK_COMPILER)
+#if defined(ART_USE_PORTABLE_COMPILER)
LLVMInfo* GetQuickContext() const {
return llvm_info_.get();
}
@@ -124,7 +124,7 @@
#if defined(ART_USE_DEXLANG_FRONTEND)
greenland::DexLang::Context* dex_lang_ctx_;
#endif
-#if defined(ART_USE_QUICK_COMPILER)
+#if defined(ART_USE_PORTABLE_COMPILER)
UniquePtr<LLVMInfo> llvm_info_;
Compiler* compiler_;
OatCompilationUnit* oat_compilation_unit_;
diff --git a/src/compiler_llvm/compiler_llvm.cc b/src/compiler_llvm/compiler_llvm.cc
index a964b40..aa5ec82 100644
--- a/src/compiler_llvm/compiler_llvm.cc
+++ b/src/compiler_llvm/compiler_llvm.cc
@@ -38,14 +38,15 @@
#include <llvm/Support/TargetSelect.h>
#include <llvm/Support/Threading.h>
-#if defined(ART_USE_QUICK_COMPILER)
+#if defined(ART_USE_PORTABLE_COMPILER)
namespace art {
-void oatCompileMethodToGBC(Compiler& compiler,
- const DexFile::CodeItem* code_item,
- uint32_t access_flags, InvokeType invoke_type,
- uint32_t method_idx, jobject class_loader,
- const DexFile& dex_file,
- LLVMInfo* llvm_info);
+void oatCompileMethod(Compiler& compiler,
+ const CompilerBackend compilerBackend,
+ const DexFile::CodeItem* code_item,
+ uint32_t access_flags, InvokeType invoke_type,
+ uint32_t method_idx, jobject class_loader,
+ const DexFile& dex_file,
+ LLVMInfo* llvm_info);
}
#endif
@@ -152,7 +153,7 @@
return new CompiledMethod(cunit->GetInstructionSet(),
cunit->GetCompiledCode());
-#elif defined(ART_USE_QUICK_COMPILER)
+#elif defined(ART_USE_PORTABLE_COMPILER)
std::string methodName(PrettyMethod(oat_compilation_unit->GetDexMethodIndex(),
*oat_compilation_unit->GetDexFile()));
if (insn_set_ == kX86) {
@@ -162,16 +163,17 @@
return method_compiler->Compile();
} else {
- // Use quick
- oatCompileMethodToGBC(*compiler_,
- oat_compilation_unit->GetCodeItem(),
- oat_compilation_unit->access_flags_,
- invoke_type,
- oat_compilation_unit->GetDexMethodIndex(),
- oat_compilation_unit->GetClassLoader(),
- *oat_compilation_unit->GetDexFile(),
- cunit->GetQuickContext()
- );
+ // TODO: consolidate ArtCompileMethods
+ oatCompileMethod(*compiler_,
+ kPortable,
+ oat_compilation_unit->GetCodeItem(),
+ oat_compilation_unit->access_flags_,
+ invoke_type,
+ oat_compilation_unit->GetDexMethodIndex(),
+ oat_compilation_unit->GetClassLoader(),
+ *oat_compilation_unit->GetDexFile(),
+ cunit->GetQuickContext()
+ );
cunit->SetCompiler(compiler_);
cunit->SetOatCompilationUnit(oat_compilation_unit);
diff --git a/src/compiler_llvm/compiler_llvm.h b/src/compiler_llvm/compiler_llvm.h
index 39223ef..0867e56 100644
--- a/src/compiler_llvm/compiler_llvm.h
+++ b/src/compiler_llvm/compiler_llvm.h
@@ -77,7 +77,7 @@
CompiledMethod* CompileDexMethod(OatCompilationUnit* oat_compilation_unit,
InvokeType invoke_type);
-#if defined(ART_USE_LLVM_COMPILER) && defined(ART_USE_QUICK_COMPILER)
+#if defined(ART_USE_PORTABLE_COMPILER)
CompiledMethod* CompileGBCMethod(OatCompilationUnit* oat_compilation_unit, std::string* func);
#endif
diff --git a/src/compiler_llvm/gbc_expander.cc b/src/compiler_llvm/gbc_expander.cc
index 484dd77..18cef41 100644
--- a/src/compiler_llvm/gbc_expander.cc
+++ b/src/compiler_llvm/gbc_expander.cc
@@ -370,7 +370,7 @@
func_ = &func;
changed_ = false; // Assume unchanged
-#if defined(ART_USE_QUICK_COMPILER)
+#if defined(ART_USE_PORTABLE_COMPILER)
basic_blocks_.resize(code_item_->insns_size_in_code_units_);
basic_block_landing_pads_.resize(code_item_->tries_size_, NULL);
basic_block_unwind_ = NULL;
@@ -1032,7 +1032,7 @@
bool is_div, JType op_jty) {
llvm::Value* dividend = call_inst.getArgOperand(0);
llvm::Value* divisor = call_inst.getArgOperand(1);
-#if defined(ART_USE_QUICK_COMPILER)
+#if defined(ART_USE_PORTABLE_COMPILER)
uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
EmitGuard_DivZeroException(dex_pc, divisor, op_jty);
#endif
@@ -1145,7 +1145,7 @@
};
llvm::Value* entry_addr = irb_.CreateGEP(shadow_frame_, gep_index);
-#if defined(ART_USE_QUICK_COMPILER)
+#if defined(ART_USE_PORTABLE_COMPILER)
if (obj->getType() != irb_.getJObjectTy()) {
obj = irb_.getJNull();
}
@@ -1155,7 +1155,7 @@
}
void GBCExpanderPass::Expand_PopShadowFrame() {
-#if defined(ART_USE_QUICK_COMPILER)
+#if defined(ART_USE_PORTABLE_COMPILER)
if (old_shadow_frame_ == NULL) {
return;
}
@@ -1191,7 +1191,7 @@
// alloca instructions)
EmitStackOverflowCheck(&*first_non_alloca);
-#if defined(ART_USE_QUICK_COMPILER)
+#if defined(ART_USE_PORTABLE_COMPILER)
irb_.Runtime().EmitTestSuspend();
#endif
@@ -2385,7 +2385,7 @@
}
void GBCExpanderPass::EmitUpdateDexPC(uint32_t dex_pc) {
-#if defined(ART_USE_QUICK_COMPILER)
+#if defined(ART_USE_PORTABLE_COMPILER)
if (shadow_frame_ == NULL) {
return;
}
@@ -2477,7 +2477,7 @@
// Get return type
char ret_shorty = shorty[0];
-#if defined(ART_USE_QUICK_COMPILER)
+#if defined(ART_USE_PORTABLE_COMPILER)
ret_shorty = art::remapShorty(ret_shorty);
#endif
llvm::Type* ret_type = irb_.getJType(ret_shorty, kAccurate);
@@ -2492,7 +2492,7 @@
}
for (uint32_t i = 1; i < shorty_size; ++i) {
-#if defined(ART_USE_QUICK_COMPILER)
+#if defined(ART_USE_PORTABLE_COMPILER)
char shorty_type = art::remapShorty(shorty[i]);
args_type.push_back(irb_.getJType(shorty_type, kAccurate));
#else
@@ -2627,7 +2627,7 @@
// Emit the code to return default value (zero) for the given return type.
char ret_shorty = oat_compilation_unit_->GetShorty()[0];
-#if defined(ART_USE_QUICK_COMPILER)
+#if defined(ART_USE_PORTABLE_COMPILER)
ret_shorty = art::remapShorty(ret_shorty);
#endif
if (ret_shorty == 'V') {
diff --git a/src/compiler_llvm/runtime_support_llvm.cc b/src/compiler_llvm/runtime_support_llvm.cc
index 668712f..0e0500c 100644
--- a/src/compiler_llvm/runtime_support_llvm.cc
+++ b/src/compiler_llvm/runtime_support_llvm.cc
@@ -86,7 +86,16 @@
void art_test_suspend_from_code(Thread* thread)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- thread->FullSuspendCheck();
+ for (;;) {
+ if (thread->ReadFlag(kCheckpointRequest)) {
+ thread->RunCheckpointFunction();
+ thread->AtomicClearFlag(kCheckpointRequest);
+ } else if (thread->ReadFlag(kSuspendRequest)) {
+ thread->FullSuspendCheck();
+ } else {
+ break;
+ }
+ }
}
ShadowFrame* art_push_shadow_frame_from_code(Thread* thread, ShadowFrame* new_shadow_frame,
@@ -367,13 +376,13 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(uint32_t));
if (LIKELY(field != NULL)) {
- field->Set32(NULL, new_value);
+ field->Set32(field->GetDeclaringClass(), new_value);
return 0;
}
field = FindFieldFromCode(field_idx, referrer, art_get_current_thread_from_code(),
StaticPrimitiveWrite, sizeof(uint32_t));
if (LIKELY(field != NULL)) {
- field->Set32(NULL, new_value);
+ field->Set32(field->GetDeclaringClass(), new_value);
return 0;
}
return -1;
@@ -383,13 +392,13 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(uint64_t));
if (LIKELY(field != NULL)) {
- field->Set64(NULL, new_value);
+ field->Set64(field->GetDeclaringClass(), new_value);
return 0;
}
field = FindFieldFromCode(field_idx, referrer, art_get_current_thread_from_code(),
StaticPrimitiveWrite, sizeof(uint64_t));
if (LIKELY(field != NULL)) {
- field->Set64(NULL, new_value);
+ field->Set64(field->GetDeclaringClass(), new_value);
return 0;
}
return -1;
@@ -399,13 +408,13 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Field* field = FindFieldFast(field_idx, referrer, StaticObjectWrite, sizeof(Object*));
if (LIKELY(field != NULL)) {
- field->SetObj(NULL, new_value);
+ field->SetObj(field->GetDeclaringClass(), new_value);
return 0;
}
field = FindFieldFromCode(field_idx, referrer, art_get_current_thread_from_code(),
StaticObjectWrite, sizeof(Object*));
if (LIKELY(field != NULL)) {
- field->SetObj(NULL, new_value);
+ field->SetObj(field->GetDeclaringClass(), new_value);
return 0;
}
return -1;
@@ -415,12 +424,12 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint32_t));
if (LIKELY(field != NULL)) {
- return field->Get32(NULL);
+ return field->Get32(field->GetDeclaringClass());
}
field = FindFieldFromCode(field_idx, referrer, art_get_current_thread_from_code(),
StaticPrimitiveRead, sizeof(uint32_t));
if (LIKELY(field != NULL)) {
- return field->Get32(NULL);
+ return field->Get32(field->GetDeclaringClass());
}
return 0;
}
@@ -429,12 +438,12 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint64_t));
if (LIKELY(field != NULL)) {
- return field->Get64(NULL);
+ return field->Get64(field->GetDeclaringClass());
}
field = FindFieldFromCode(field_idx, referrer, art_get_current_thread_from_code(),
StaticPrimitiveRead, sizeof(uint64_t));
if (LIKELY(field != NULL)) {
- return field->Get64(NULL);
+ return field->Get64(field->GetDeclaringClass());
}
return 0;
}
@@ -443,12 +452,12 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Field* field = FindFieldFast(field_idx, referrer, StaticObjectRead, sizeof(Object*));
if (LIKELY(field != NULL)) {
- return field->GetObj(NULL);
+ return field->GetObj(field->GetDeclaringClass());
}
field = FindFieldFromCode(field_idx, referrer, art_get_current_thread_from_code(),
StaticObjectRead, sizeof(Object*));
if (LIKELY(field != NULL)) {
- return field->GetObj(NULL);
+ return field->GetObj(field->GetDeclaringClass());
}
return 0;
}
diff --git a/src/compiler_llvm/stub_compiler.cc b/src/compiler_llvm/stub_compiler.cc
index 3ac5f2a..4854c9f 100644
--- a/src/compiler_llvm/stub_compiler.cc
+++ b/src/compiler_llvm/stub_compiler.cc
@@ -151,7 +151,7 @@
llvm::Value* code_addr = irb_.CreateLoad(code_field_addr, kTBAAJRuntime);
llvm::CallInst* retval = irb_.CreateCall(code_addr, args);
-#if defined(ART_USE_QUICK_COMPILER)
+#if defined(ART_USE_PORTABLE_COMPILER)
for (size_t i = 1; i < shorty_size; ++i) {
switch(shorty[i]) {
case 'Z':
@@ -216,7 +216,7 @@
llvm::Function* func =
llvm::Function::Create(accurate_func_type, llvm::Function::ExternalLinkage,
func_name, module_);
-#if defined(ART_USE_QUICK_COMPILER)
+#if defined(ART_USE_PORTABLE_COMPILER)
switch(shorty[0]) {
case 'Z':
case 'C':
diff --git a/src/debugger.cc b/src/debugger.cc
index a34f690..7de675c 100644
--- a/src/debugger.cc
+++ b/src/debugger.cc
@@ -1172,7 +1172,7 @@
// arg_count considers doubles and longs to take 2 units.
// variable_count considers everything to take 1 unit.
std::string shorty(mh.GetShorty());
- expandBufAdd4BE(pReply, m->NumArgRegisters(shorty));
+ expandBufAdd4BE(pReply, AbstractMethod::NumArgRegisters(shorty));
// We don't know the total number of variables yet, so leave a blank and update it later.
size_t variable_count_offset = expandBufGetLength(pReply);
diff --git a/src/dex2oat.cc b/src/dex2oat.cc
index 20872e1..a8f42af 100644
--- a/src/dex2oat.cc
+++ b/src/dex2oat.cc
@@ -83,11 +83,9 @@
UsageError(" to the file descriptor specified by --oat-fd.");
UsageError(" Example: --oat-location=/data/art-cache/system@app@Calculator.apk.oat");
UsageError("");
-#if defined(ART_USE_LLVM_COMPILER)
UsageError(" --bitcode=<file.bc>: specifies the optional bitcode filename.");
UsageError(" Example: --bitcode=/system/framework/boot.bc");
UsageError("");
-#endif
UsageError(" --image=<file.art>: specifies the output image filename.");
UsageError(" Example: --image=/system/framework/boot.art");
UsageError("");
@@ -111,6 +109,10 @@
UsageError(" Example: --instruction-set=x86");
UsageError(" Default: arm");
UsageError("");
+ UsageError(" --compiler-backend=(Quick|QuickGBC|Portable): select compiler backend");
+ UsageError(" set.");
+ UsageError(" Example: --instruction-set=Portable");
+ UsageError(" Default: Quick");
UsageError(" --runtime-arg <argument>: used to specify various arguments for the runtime,");
UsageError(" such as initial heap size, maximum heap size, and verbose output.");
UsageError(" Use a separate --runtime-arg switch for each argument.");
@@ -122,14 +124,15 @@
class Dex2Oat {
public:
- static bool Create(Dex2Oat** p_dex2oat, Runtime::Options& options, InstructionSet instruction_set,
- size_t thread_count, bool support_debugging)
+ static bool Create(Dex2Oat** p_dex2oat, Runtime::Options& options, CompilerBackend compiler_backend,
+ InstructionSet instruction_set, size_t thread_count, bool support_debugging)
SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_) {
if (!CreateRuntime(options, instruction_set)) {
*p_dex2oat = NULL;
return false;
}
- *p_dex2oat = new Dex2Oat(Runtime::Current(), instruction_set, thread_count, support_debugging);
+ *p_dex2oat = new Dex2Oat(Runtime::Current(), compiler_backend, instruction_set, thread_count,
+ support_debugging);
return true;
}
@@ -209,9 +212,7 @@
const std::string* host_prefix,
const std::vector<const DexFile*>& dex_files,
File* oat_file,
-#if defined(ART_USE_LLVM_COMPILER)
const std::string& bitcode_filename,
-#endif
bool image,
const std::set<std::string>* image_classes,
bool dump_stats,
@@ -234,7 +235,8 @@
Runtime::Current()->SetCompileTimeClassPath(class_loader, class_path_files);
}
- UniquePtr<Compiler> compiler(new Compiler(instruction_set_,
+ UniquePtr<Compiler> compiler(new Compiler(compiler_backend_,
+ instruction_set_,
image,
thread_count_,
support_debugging_,
@@ -242,9 +244,9 @@
dump_stats,
dump_timings));
-#if defined(ART_USE_LLVM_COMPILER)
- compiler->SetBitcodeFileName(bitcode_filename);
-#endif
+ if ((compiler_backend_ == kPortable) || (compiler_backend_ == kIceland)) {
+ compiler->SetBitcodeFileName(bitcode_filename);
+ }
Thread::Current()->TransitionFromRunnableToSuspended(kNative);
@@ -295,9 +297,10 @@
}
private:
- explicit Dex2Oat(Runtime* runtime, InstructionSet instruction_set, size_t thread_count,
- bool support_debugging)
- : instruction_set_(instruction_set),
+ explicit Dex2Oat(Runtime* runtime, CompilerBackend compiler_backend, InstructionSet instruction_set,
+ size_t thread_count, bool support_debugging)
+ : compiler_backend_(compiler_backend),
+ instruction_set_(instruction_set),
runtime_(runtime),
thread_count_(thread_count),
support_debugging_(support_debugging),
@@ -431,6 +434,8 @@
return false;
}
+ const CompilerBackend compiler_backend_;
+
const InstructionSet instruction_set_;
Runtime* runtime_;
@@ -487,9 +492,7 @@
std::string oat_filename;
std::string oat_location;
int oat_fd = -1;
-#if defined(ART_USE_LLVM_COMPILER)
std::string bitcode_filename;
-#endif
const char* image_classes_filename = NULL;
std::string image_filename;
std::string boot_image_filename;
@@ -498,6 +501,13 @@
std::vector<const char*> runtime_args;
int thread_count = sysconf(_SC_NPROCESSORS_CONF);
bool support_debugging = false;
+#if defined(ART_USE_PORTABLE_COMPILER)
+ CompilerBackend compiler_backend = kPortable;
+#elif defined(ART_USE_LLVM_COMPILER)
+ CompilerBackend compiler_backend = kIceland;
+#else
+ CompilerBackend compiler_backend = kQuick;
+#endif
#if defined(__arm__)
InstructionSet instruction_set = kThumb2;
#elif defined(__i386__)
@@ -543,10 +553,8 @@
}
} else if (option.starts_with("--oat-location=")) {
oat_location = option.substr(strlen("--oat-location=")).data();
-#if defined(ART_USE_LLVM_COMPILER)
} else if (option.starts_with("--bitcode=")) {
bitcode_filename = option.substr(strlen("--bitcode=")).data();
-#endif
} else if (option.starts_with("--image=")) {
image_filename = option.substr(strlen("--image=")).data();
} else if (option.starts_with("--image-classes=")) {
@@ -571,6 +579,18 @@
} else if (instruction_set_str == "x86") {
instruction_set = kX86;
}
+ } else if (option.starts_with("--compiler-backend=")) {
+ StringPiece backend_str = option.substr(strlen("--compiler-backend=")).data();
+ if (backend_str == "Quick") {
+ compiler_backend = kQuick;
+ } else if (backend_str == "QuickGBC") {
+ compiler_backend = kQuickGBC;
+ } else if (backend_str == "Iceland") {
+ // TODO: remove this when Portable/Iceland merge complete
+ compiler_backend = kIceland;
+ } else if (backend_str == "Portable") {
+ compiler_backend = kPortable;
+ }
} else if (option == "--runtime-arg") {
if (++i >= argc) {
Usage("Missing required argument for --runtime-arg");
@@ -704,7 +724,7 @@
}
Dex2Oat* p_dex2oat;
- if (!Dex2Oat::Create(&p_dex2oat, options, instruction_set, thread_count, support_debugging)) {
+ if (!Dex2Oat::Create(&p_dex2oat, options, compiler_backend, instruction_set, thread_count, support_debugging)) {
LOG(ERROR) << "Failed to create dex2oat";
return EXIT_FAILURE;
}
@@ -755,9 +775,7 @@
host_prefix.get(),
dex_files,
oat_file.get(),
-#if defined(ART_USE_LLVM_COMPILER)
bitcode_filename,
-#endif
image,
image_classes.get(),
dump_stats,
diff --git a/src/dex_file.cc b/src/dex_file.cc
index c433f3d..b7cefe3 100644
--- a/src/dex_file.cc
+++ b/src/dex_file.cc
@@ -978,18 +978,18 @@
void EncodedStaticFieldValueIterator::ReadValueToField(Field* field) const {
switch (type_) {
- case kBoolean: field->SetBoolean(NULL, jval_.z); break;
- case kByte: field->SetByte(NULL, jval_.b); break;
- case kShort: field->SetShort(NULL, jval_.s); break;
- case kChar: field->SetChar(NULL, jval_.c); break;
- case kInt: field->SetInt(NULL, jval_.i); break;
- case kLong: field->SetLong(NULL, jval_.j); break;
- case kFloat: field->SetFloat(NULL, jval_.f); break;
- case kDouble: field->SetDouble(NULL, jval_.d); break;
- case kNull: field->SetObject(NULL, NULL); break;
+ case kBoolean: field->SetBoolean(field->GetDeclaringClass(), jval_.z); break;
+ case kByte: field->SetByte(field->GetDeclaringClass(), jval_.b); break;
+ case kShort: field->SetShort(field->GetDeclaringClass(), jval_.s); break;
+ case kChar: field->SetChar(field->GetDeclaringClass(), jval_.c); break;
+ case kInt: field->SetInt(field->GetDeclaringClass(), jval_.i); break;
+ case kLong: field->SetLong(field->GetDeclaringClass(), jval_.j); break;
+ case kFloat: field->SetFloat(field->GetDeclaringClass(), jval_.f); break;
+ case kDouble: field->SetDouble(field->GetDeclaringClass(), jval_.d); break;
+ case kNull: field->SetObject(field->GetDeclaringClass(), NULL); break;
case kString: {
String* resolved = linker_->ResolveString(dex_file_, jval_.i, dex_cache_);
- field->SetObject(NULL, resolved);
+ field->SetObject(field->GetDeclaringClass(), resolved);
break;
}
case kType: {
diff --git a/src/dex_instruction.h b/src/dex_instruction.h
index 90dca69..486bbf5 100644
--- a/src/dex_instruction.h
+++ b/src/dex_instruction.h
@@ -253,6 +253,11 @@
kVerifySwitchTargets | kVerifyVarArg | kVerifyVarArgRange | kVerifyError));
}
+ // Get the dex PC of this instruction as a offset in code units from the beginning of insns.
+ uint32_t GetDexPc(const uint16_t* insns) const {
+ return (reinterpret_cast<const uint16_t*>(this) - insns);
+ }
+
// Dump decoded version of instruction
std::string DumpString(const DexFile*) const;
diff --git a/src/gc/mark_sweep.cc b/src/gc/mark_sweep.cc
index 0869e26..e93eb1a 100644
--- a/src/gc/mark_sweep.cc
+++ b/src/gc/mark_sweep.cc
@@ -527,7 +527,7 @@
Thread* self;
};
-class CheckpointMarkThreadRoots : public Thread::CheckpointFunction {
+class CheckpointMarkThreadRoots : public Closure {
public:
CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {
@@ -536,7 +536,8 @@
virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* self = Thread::Current();
- DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc);
+ DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
+ << thread->GetState();
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
thread->VisitRoots(MarkSweep::MarkObjectVisitor, mark_sweep_);
mark_sweep_->GetBarrier().Pass(self);
diff --git a/src/greenland/ir_builder.h b/src/greenland/ir_builder.h
index baa0ae7..ba8e5e1 100644
--- a/src/greenland/ir_builder.h
+++ b/src/greenland/ir_builder.h
@@ -33,7 +33,6 @@
namespace art {
namespace greenland {
-#if defined(ART_USE_QUICK_COMPILER)
class InserterWithDexOffset
: public llvm::IRBuilderDefaultInserter<true> {
public:
@@ -54,9 +53,6 @@
};
typedef llvm::IRBuilder<true, llvm::NoFolder, InserterWithDexOffset> LLVMIRBuilder;
-#else
-typedef llvm::IRBuilder<true> LLVMIRBuilder;
-#endif
class IRBuilder : public LLVMIRBuilder {
public:
diff --git a/src/image_test.cc b/src/image_test.cc
index afccb4a..e2abbac 100644
--- a/src/image_test.cc
+++ b/src/image_test.cc
@@ -72,6 +72,9 @@
ASSERT_GE(sizeof(image_header) + space->Size(), static_cast<size_t>(file->Length()));
}
+ // Need to delete the compiler since it has worker threads which are attached to runtime.
+ delete compiler_.release();
+
// tear down old runtime before making a new one, clearing out misc state
delete runtime_.release();
java_lang_dex_file_ = NULL;
diff --git a/src/interpreter/interpreter.cc b/src/interpreter/interpreter.cc
new file mode 100644
index 0000000..550c6ee
--- /dev/null
+++ b/src/interpreter/interpreter.cc
@@ -0,0 +1,1500 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "interpreter.h"
+
+#include <math.h>
+
+#include "common_throws.h"
+#include "dex_instruction.h"
+#include "invoke_arg_array_builder.h"
+#include "logging.h"
+#include "object.h"
+#include "object_utils.h"
+#include "runtime_support.h"
+#include "ScopedLocalRef.h"
+#include "scoped_thread_state_change.h"
+#include "thread.h"
+
+namespace art {
+namespace interpreter {
+
+static void DoMonitorEnter(Thread* self, Object* ref) NO_THREAD_SAFETY_ANALYSIS {
+ ref->MonitorEnter(self);
+}
+
+static void DoMonitorExit(Thread* self, Object* ref) NO_THREAD_SAFETY_ANALYSIS {
+ ref->MonitorExit(self);
+}
+
+static void DoInvoke(Thread* self, MethodHelper& mh, ShadowFrame& shadow_frame,
+ const DecodedInstruction& dec_insn, InvokeType type, bool is_range,
+ JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Object* receiver;
+ if (type == kStatic) {
+ receiver = NULL;
+ } else {
+ receiver = shadow_frame.GetReference(dec_insn.vC);
+ if (UNLIKELY(receiver == NULL)) {
+ ThrowNullPointerExceptionForMethodAccess(shadow_frame.GetMethod(), dec_insn.vB, type);
+ result->SetJ(0);
+ return;
+ }
+ }
+ uint32_t method_idx = dec_insn.vB;
+ AbstractMethod* target_method = FindMethodFromCode(method_idx, receiver,
+ shadow_frame.GetMethod(), self, true,
+ type);
+ if (UNLIKELY(target_method == NULL)) {
+ CHECK(self->IsExceptionPending());
+ result->SetJ(0);
+ return;
+ }
+ mh.ChangeMethod(target_method);
+ ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength());
+ if (is_range) {
+ arg_array.BuildArgArray(shadow_frame, dec_insn.vC + (type != kStatic ? 1 : 0));
+ } else {
+ arg_array.BuildArgArray(shadow_frame, dec_insn.arg + (type != kStatic ? 1 : 0));
+ }
+ target_method->Invoke(self, receiver, arg_array.get(), result);
+ if (!mh.GetReturnType()->IsPrimitive() && result->GetL() != NULL) {
+ CHECK(mh.GetReturnType()->IsAssignableFrom(result->GetL()->GetClass()));
+ }
+ mh.ChangeMethod(shadow_frame.GetMethod());
+}
+
+static void DoFieldGet(Thread* self, ShadowFrame& shadow_frame,
+ const DecodedInstruction& dec_insn, FindFieldType find_type,
+ Primitive::Type field_type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool is_static = (find_type == StaticObjectRead) || (find_type == StaticPrimitiveRead);
+ uint32_t field_idx = is_static ? dec_insn.vB : dec_insn.vC;
+ Field* f = FindFieldFromCode(field_idx, shadow_frame.GetMethod(), self,
+ find_type, Primitive::FieldSize(field_type));
+ if (LIKELY(f != NULL)) {
+ Object* obj;
+ if (is_static) {
+ obj = f->GetDeclaringClass();
+ } else {
+ obj = shadow_frame.GetReference(dec_insn.vB);
+ if (UNLIKELY(obj == NULL)) {
+ ThrowNullPointerExceptionForFieldAccess(f, true);
+ }
+ }
+ switch (field_type) {
+ case Primitive::kPrimBoolean:
+ shadow_frame.SetVReg(dec_insn.vA, f->GetBoolean(obj));
+ break;
+ case Primitive::kPrimByte:
+ shadow_frame.SetVReg(dec_insn.vA, f->GetByte(obj));
+ break;
+ case Primitive::kPrimChar:
+ shadow_frame.SetVReg(dec_insn.vA, f->GetChar(obj));
+ break;
+ case Primitive::kPrimShort:
+ shadow_frame.SetVReg(dec_insn.vA, f->GetShort(obj));
+ break;
+ case Primitive::kPrimInt:
+ shadow_frame.SetVReg(dec_insn.vA, f->GetInt(obj));
+ break;
+ case Primitive::kPrimLong:
+ shadow_frame.SetVRegLong(dec_insn.vA, f->GetLong(obj));
+ break;
+ case Primitive::kPrimNot:
+ shadow_frame.SetReferenceAndVReg(dec_insn.vA, f->GetObject(obj));
+ break;
+ default:
+ LOG(FATAL) << "Unreachable: " << field_type;
+ }
+ }
+}
+
+static void DoFieldPut(Thread* self, ShadowFrame& shadow_frame,
+ const DecodedInstruction& dec_insn, FindFieldType find_type,
+ Primitive::Type field_type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool is_static = (find_type == StaticObjectWrite) || (find_type == StaticPrimitiveWrite);
+ uint32_t field_idx = is_static ? dec_insn.vB : dec_insn.vC;
+ Field* f = FindFieldFromCode(field_idx, shadow_frame.GetMethod(), self,
+ find_type, Primitive::FieldSize(field_type));
+ if (LIKELY(f != NULL)) {
+ Object* obj;
+ if (is_static) {
+ obj = f->GetDeclaringClass();
+ } else {
+ obj = shadow_frame.GetReference(dec_insn.vB);
+ if (UNLIKELY(obj == NULL)) {
+ ThrowNullPointerExceptionForFieldAccess(f, false);
+ }
+ }
+ switch (field_type) {
+ case Primitive::kPrimBoolean:
+ f->SetBoolean(obj, shadow_frame.GetVReg(dec_insn.vA));
+ break;
+ case Primitive::kPrimByte:
+ f->SetByte(obj, shadow_frame.GetVReg(dec_insn.vA));
+ shadow_frame.SetVReg(dec_insn.vA, f->GetByte(obj));
+ break;
+ case Primitive::kPrimChar:
+ f->SetChar(obj, shadow_frame.GetVReg(dec_insn.vA));
+ shadow_frame.SetVReg(dec_insn.vA, f->GetChar(obj));
+ break;
+ case Primitive::kPrimShort:
+ f->SetShort(obj, shadow_frame.GetVReg(dec_insn.vA));
+ shadow_frame.SetVReg(dec_insn.vA, f->GetShort(obj));
+ break;
+ case Primitive::kPrimInt:
+ f->SetInt(obj, shadow_frame.GetVReg(dec_insn.vA));
+ shadow_frame.SetVReg(dec_insn.vA, f->GetInt(obj));
+ break;
+ case Primitive::kPrimLong:
+ f->SetLong(obj, shadow_frame.GetVRegLong(dec_insn.vA));
+ shadow_frame.SetVRegLong(dec_insn.vA, f->GetLong(obj));
+ break;
+ case Primitive::kPrimNot:
+ f->SetObj(obj, shadow_frame.GetReference(dec_insn.vA));
+ break;
+ default:
+ LOG(FATAL) << "Unreachable: " << field_type;
+ }
+ }
+}
+
+static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
+ ShadowFrame& shadow_frame) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const uint16_t* insns = code_item->insns_;
+ const Instruction* inst = Instruction::At(insns + shadow_frame.GetDexPC());
+ JValue result_register;
+ while (true) {
+ shadow_frame.SetDexPC(inst->GetDexPc(insns));
+ DecodedInstruction dec_insn(inst);
+ const bool kTracing = true;
+ if (kTracing) {
+ LOG(INFO) << PrettyMethod(shadow_frame.GetMethod())
+ << StringPrintf("\n0x%x: %s\nReferences:",
+ inst->GetDexPc(insns), inst->DumpString(&mh.GetDexFile()).c_str());
+ for (size_t i = 0; i < shadow_frame.NumberOfReferences(); ++i) {
+ Object* o = shadow_frame.GetReference(i);
+ if (o != NULL) {
+ if (o->GetClass()->IsStringClass() && o->AsString()->GetCharArray() != NULL) {
+ LOG(INFO) << i << ": java.lang.String " << static_cast<void*>(o)
+ << " \"" << o->AsString()->ToModifiedUtf8() << "\"";
+ } else {
+ LOG(INFO) << i << ": " << PrettyTypeOf(o) << " " << static_cast<void*>(o);
+ }
+ } else {
+ LOG(INFO) << i << ": null";
+ }
+ }
+ LOG(INFO) << "vregs:";
+ for (size_t i = 0; i < shadow_frame.NumberOfReferences(); ++i) {
+ LOG(INFO) << StringPrintf("%d: %08x", i, shadow_frame.GetVReg(i));
+ }
+ }
+ const Instruction* next_inst = inst->Next();
+ switch (dec_insn.opcode) {
+ case Instruction::NOP:
+ break;
+ case Instruction::MOVE:
+ case Instruction::MOVE_FROM16:
+ case Instruction::MOVE_16:
+ shadow_frame.SetVReg(dec_insn.vA, shadow_frame.GetVReg(dec_insn.vB));
+ break;
+ case Instruction::MOVE_WIDE:
+ case Instruction::MOVE_WIDE_FROM16:
+ case Instruction::MOVE_WIDE_16:
+ shadow_frame.SetVRegLong(dec_insn.vA, shadow_frame.GetVRegLong(dec_insn.vB));
+ break;
+ case Instruction::MOVE_OBJECT:
+ case Instruction::MOVE_OBJECT_FROM16:
+ case Instruction::MOVE_OBJECT_16:
+ shadow_frame.SetVReg(dec_insn.vA, shadow_frame.GetVReg(dec_insn.vB));
+ shadow_frame.SetReference(dec_insn.vA, shadow_frame.GetReference(dec_insn.vB));
+ break;
+ case Instruction::MOVE_RESULT:
+ shadow_frame.SetVReg(dec_insn.vA, result_register.GetI());
+ break;
+ case Instruction::MOVE_RESULT_WIDE:
+ shadow_frame.SetVRegLong(dec_insn.vA, result_register.GetJ());
+ break;
+ case Instruction::MOVE_RESULT_OBJECT:
+ shadow_frame.SetReferenceAndVReg(dec_insn.vA, result_register.GetL());
+ break;
+ case Instruction::MOVE_EXCEPTION: {
+ Throwable* exception = self->GetException();
+ self->ClearException();
+ shadow_frame.SetReferenceAndVReg(dec_insn.vA, exception);
+ break;
+ }
+ case Instruction::RETURN_VOID: {
+ JValue result;
+ result.SetJ(0);
+ return result;
+ }
+ case Instruction::RETURN: {
+ JValue result;
+ result.SetJ(0);
+ result.SetI(shadow_frame.GetVReg(dec_insn.vA));
+ return result;
+ }
+ case Instruction::RETURN_WIDE: {
+ JValue result;
+ result.SetJ(shadow_frame.GetVRegLong(dec_insn.vA));
+ return result;
+ }
+ case Instruction::RETURN_OBJECT: {
+ JValue result;
+ result.SetJ(0);
+ result.SetL(shadow_frame.GetReference(dec_insn.vA));
+ return result;
+ }
+ case Instruction::CONST_4: {
+ int32_t val = (dec_insn.vB << 28) >> 28;
+ shadow_frame.SetVReg(dec_insn.vA, val);
+ if (val == 0) {
+ shadow_frame.SetReference(dec_insn.vA, NULL);
+ }
+ break;
+ }
+ case Instruction::CONST_16: {
+ int32_t val = static_cast<int16_t>(dec_insn.vB);
+ shadow_frame.SetVReg(dec_insn.vA, val);
+ if (val == 0) {
+ shadow_frame.SetReference(dec_insn.vA, NULL);
+ }
+ break;
+ }
+ case Instruction::CONST: {
+ int32_t val = dec_insn.vB;
+ shadow_frame.SetVReg(dec_insn.vA, val);
+ if (val == 0) {
+ shadow_frame.SetReference(dec_insn.vA, NULL);
+ }
+ break;
+ }
+ case Instruction::CONST_HIGH16: {
+ int32_t val = dec_insn.vB << 16;
+ shadow_frame.SetVReg(dec_insn.vA, val);
+ if (val == 0) {
+ shadow_frame.SetReference(dec_insn.vA, NULL);
+ }
+ break;
+ }
+ case Instruction::CONST_WIDE_16: {
+ int64_t val = static_cast<int16_t>(dec_insn.vB);
+ shadow_frame.SetVReg(dec_insn.vA, val);
+ shadow_frame.SetVReg(dec_insn.vA + 1, val >> 32);
+ break;
+ }
+ case Instruction::CONST_WIDE_32: {
+ int64_t val = static_cast<int32_t>(dec_insn.vB);
+ shadow_frame.SetVReg(dec_insn.vA, val);
+ shadow_frame.SetVReg(dec_insn.vA + 1, val >> 32);
+ break;
+ }
+ case Instruction::CONST_WIDE:
+ shadow_frame.SetVReg(dec_insn.vA, dec_insn.vB_wide);
+ shadow_frame.SetVReg(dec_insn.vA + 1, dec_insn.vB_wide >> 32);
+ break;
+ case Instruction::CONST_WIDE_HIGH16:
+ shadow_frame.SetVRegLong(dec_insn.vA + 1, static_cast<uint64_t>(dec_insn.vB) << 48);
+ break;
+ case Instruction::CONST_STRING:
+ case Instruction::CONST_STRING_JUMBO: {
+ if (UNLIKELY(!String::GetJavaLangString()->IsInitialized())) {
+ Runtime::Current()->GetClassLinker()->EnsureInitialized(String::GetJavaLangString(),
+ true, true);
+ }
+ String* s = mh.ResolveString(dec_insn.vB);
+ shadow_frame.SetReferenceAndVReg(dec_insn.vA, s);
+ break;
+ }
+ case Instruction::CONST_CLASS:
+ shadow_frame.SetReference(dec_insn.vA, mh.ResolveClass(dec_insn.vB));
+ break;
+ case Instruction::MONITOR_ENTER:
+ DoMonitorEnter(self, shadow_frame.GetReference(dec_insn.vA));
+ break;
+ case Instruction::MONITOR_EXIT:
+ DoMonitorExit(self, shadow_frame.GetReference(dec_insn.vA));
+ break;
+ case Instruction::CHECK_CAST: {
+ Class* c = mh.ResolveClass(dec_insn.vB);
+ Object* obj = shadow_frame.GetReference(dec_insn.vA);
+ if (UNLIKELY(obj != NULL && !obj->InstanceOf(c))) {
+ self->ThrowNewExceptionF("Ljava/lang/ClassCastException;",
+ "%s cannot be cast to %s",
+ PrettyDescriptor(obj->GetClass()).c_str(),
+ PrettyDescriptor(c).c_str());
+ }
+ break;
+ }
+ case Instruction::INSTANCE_OF: {
+ Class* c = mh.ResolveClass(dec_insn.vC);
+ Object* obj = shadow_frame.GetReference(dec_insn.vB);
+ shadow_frame.SetVReg(dec_insn.vA, (obj != NULL && obj->InstanceOf(c)) ? 1 : 0);
+ break;
+ }
+ case Instruction::ARRAY_LENGTH: {
+ Array* array = shadow_frame.GetReference(dec_insn.vB)->AsArray();
+ if (UNLIKELY(array == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns));
+ break;
+ }
+ shadow_frame.SetVReg(dec_insn.vA, array->GetLength());
+ break;
+ }
+ case Instruction::NEW_INSTANCE: {
+ Object* obj = AllocObjectFromCode(dec_insn.vB, shadow_frame.GetMethod(), self, true);
+ shadow_frame.SetReferenceAndVReg(dec_insn.vA, obj);
+ break;
+ }
+ case Instruction::NEW_ARRAY: {
+ int32_t length = shadow_frame.GetVReg(dec_insn.vB);
+ Object* obj = AllocArrayFromCode(dec_insn.vC, shadow_frame.GetMethod(), length, self, true);
+ shadow_frame.SetReferenceAndVReg(dec_insn.vA, obj);
+ break;
+ }
+ case Instruction::FILLED_NEW_ARRAY:
+ case Instruction::FILLED_NEW_ARRAY_RANGE:
+ UNIMPLEMENTED(FATAL) << inst->DumpString(&mh.GetDexFile());
+ break;
+ case Instruction::CMPL_FLOAT: {
+ float val1 = shadow_frame.GetVRegFloat(dec_insn.vB);
+ float val2 = shadow_frame.GetVRegFloat(dec_insn.vC);
+ int32_t result;
+ if (val1 == val2) {
+ result = 0;
+ } else if (val1 > val2) {
+ result = 1;
+ } else {
+ result = -1;
+ }
+ shadow_frame.SetVReg(dec_insn.vA, result);
+ break;
+ }
+ case Instruction::CMPG_FLOAT: {
+ float val1 = shadow_frame.GetVRegFloat(dec_insn.vB);
+ float val2 = shadow_frame.GetVRegFloat(dec_insn.vC);
+ int32_t result;
+ if (val1 == val2) {
+ result = 0;
+ } else if (val1 < val2) {
+ result = -1;
+ } else {
+ result = 1;
+ }
+ shadow_frame.SetVReg(dec_insn.vA, result);
+ break;
+ }
+ case Instruction::CMPL_DOUBLE: {
+ double val1 = shadow_frame.GetVRegDouble(dec_insn.vB);
+ double val2 = shadow_frame.GetVRegDouble(dec_insn.vC);
+ int32_t result;
+ if (val1 == val2) {
+ result = 0;
+ } else if (val1 > val2) {
+ result = 1;
+ } else {
+ result = -1;
+ }
+ shadow_frame.SetVReg(dec_insn.vA, result);
+ break;
+ }
+
+ case Instruction::CMPG_DOUBLE: {
+ double val1 = shadow_frame.GetVRegDouble(dec_insn.vB);
+ double val2 = shadow_frame.GetVRegDouble(dec_insn.vC);
+ int32_t result;
+ if (val1 == val2) {
+ result = 0;
+ } else if (val1 < val2) {
+ result = -1;
+ } else {
+ result = 1;
+ }
+ shadow_frame.SetVReg(dec_insn.vA, result);
+ break;
+ }
+ case Instruction::CMP_LONG: {
+ int64_t val1 = shadow_frame.GetVRegLong(dec_insn.vB);
+ int64_t val2 = shadow_frame.GetVRegLong(dec_insn.vC);
+ int32_t result;
+ if (val1 < val2) {
+ result = -1;
+ } else if (val1 == val2) {
+ result = 0;
+ } else {
+ result = 1;
+ }
+ shadow_frame.SetVReg(dec_insn.vA, result);
+ break;
+ }
+ case Instruction::THROW: {
+ Throwable* t = shadow_frame.GetReference(dec_insn.vA)->AsThrowable();
+ self->SetException(t);
+ break;
+ }
+ case Instruction::GOTO:
+ case Instruction::GOTO_16:
+ case Instruction::GOTO_32: {
+ uint32_t dex_pc = inst->GetDexPc(insns);
+ next_inst = Instruction::At(insns + dex_pc + dec_insn.vA);
+ break;
+ }
+ case Instruction::PACKED_SWITCH:
+ UNIMPLEMENTED(FATAL) << inst->DumpString(&mh.GetDexFile());
+ break;
+ case Instruction::SPARSE_SWITCH: {
+ uint32_t dex_pc = inst->GetDexPc(insns);
+ const uint16_t* switchData = insns + dex_pc + dec_insn.vB;
+ int32_t testVal = shadow_frame.GetVReg(dec_insn.vA);
+ CHECK_EQ(switchData[0], static_cast<uint16_t>(Instruction::kSparseSwitchSignature));
+ uint16_t size = switchData[1];
+ CHECK_GT(size, 0);
+ const int32_t* keys = reinterpret_cast<const int32_t*>(&switchData[2]);
+ CHECK(IsAligned<4>(keys));
+ const int32_t* entries = keys + size;
+ CHECK(IsAligned<4>(entries));
+ int lo = 0;
+ int hi = size - 1;
+ while (lo <= hi) {
+ int mid = (lo + hi) / 2;
+ int32_t foundVal = keys[mid];
+ if (testVal < foundVal) {
+ hi = mid - 1;
+ } else if (testVal > foundVal) {
+ lo = mid + 1;
+ } else {
+ next_inst = Instruction::At(insns + dex_pc + entries[mid]);
+ break;
+ }
+ }
+ break;
+ }
+ case Instruction::FILL_ARRAY_DATA: {
+ Array* array = shadow_frame.GetReference(dec_insn.vA)->AsArray();
+ if (UNLIKELY(array == NULL)) {
+ Thread::Current()->ThrowNewExceptionF("Ljava/lang/NullPointerException;",
+ "null array in FILL_ARRAY_DATA");
+ break;
+ }
+ DCHECK(array->IsArrayInstance() && !array->IsObjectArray());
+ uint32_t dex_pc = inst->GetDexPc(insns);
+ const Instruction::ArrayDataPayload* payload =
+ reinterpret_cast<const Instruction::ArrayDataPayload*>(insns + dex_pc + dec_insn.vB);
+ if (UNLIKELY(static_cast<int32_t>(payload->element_count) > array->GetLength())) {
+ Thread::Current()->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;",
+ "failed FILL_ARRAY_DATA; length=%d, index=%d",
+ array->GetLength(), payload->element_count);
+ break;
+ }
+ uint32_t size_in_bytes = payload->element_count * payload->element_width;
+ memcpy(array->GetRawData(payload->element_width), payload->data, size_in_bytes);
+ break;
+ }
+ case Instruction::IF_EQ: {
+ if (shadow_frame.GetVReg(dec_insn.vA) == shadow_frame.GetVReg(dec_insn.vB)) {
+ uint32_t dex_pc = inst->GetDexPc(insns);
+ next_inst = Instruction::At(insns + dex_pc + dec_insn.vC);
+ }
+ break;
+ }
+ case Instruction::IF_NE: {
+ if (shadow_frame.GetVReg(dec_insn.vA) != shadow_frame.GetVReg(dec_insn.vB)) {
+ uint32_t dex_pc = inst->GetDexPc(insns);
+ next_inst = Instruction::At(insns + dex_pc + dec_insn.vC);
+ }
+ break;
+ }
+ case Instruction::IF_LT: {
+ if (shadow_frame.GetVReg(dec_insn.vA) < shadow_frame.GetVReg(dec_insn.vB)) {
+ uint32_t dex_pc = inst->GetDexPc(insns);
+ next_inst = Instruction::At(insns + dex_pc + dec_insn.vC);
+ }
+ break;
+ }
+ case Instruction::IF_GE: {
+ if (shadow_frame.GetVReg(dec_insn.vA) >= shadow_frame.GetVReg(dec_insn.vB)) {
+ uint32_t dex_pc = inst->GetDexPc(insns);
+ next_inst = Instruction::At(insns + dex_pc + dec_insn.vC);
+ }
+ break;
+ }
+ case Instruction::IF_GT: {
+ if (shadow_frame.GetVReg(dec_insn.vA) > shadow_frame.GetVReg(dec_insn.vB)) {
+ uint32_t dex_pc = inst->GetDexPc(insns);
+ next_inst = Instruction::At(insns + dex_pc + dec_insn.vC);
+ }
+ break;
+ }
+ case Instruction::IF_LE: {
+ if (shadow_frame.GetVReg(dec_insn.vA) <= shadow_frame.GetVReg(dec_insn.vB)) {
+ uint32_t dex_pc = inst->GetDexPc(insns);
+ next_inst = Instruction::At(insns + dex_pc + dec_insn.vC);
+ }
+ break;
+ }
+ case Instruction::IF_EQZ: {
+ if (shadow_frame.GetVReg(dec_insn.vA) == 0) {
+ uint32_t dex_pc = inst->GetDexPc(insns);
+ next_inst = Instruction::At(insns + dex_pc + dec_insn.vB);
+ }
+ break;
+ }
+ case Instruction::IF_NEZ: {
+ if (shadow_frame.GetVReg(dec_insn.vA) != 0) {
+ uint32_t dex_pc = inst->GetDexPc(insns);
+ next_inst = Instruction::At(insns + dex_pc + dec_insn.vB);
+ }
+ break;
+ }
+ case Instruction::IF_LTZ: {
+ if (shadow_frame.GetVReg(dec_insn.vA) < 0) {
+ uint32_t dex_pc = inst->GetDexPc(insns);
+ next_inst = Instruction::At(insns + dex_pc + dec_insn.vB);
+ }
+ break;
+ }
+ case Instruction::IF_GEZ: {
+ if (shadow_frame.GetVReg(dec_insn.vA) >= 0) {
+ uint32_t dex_pc = inst->GetDexPc(insns);
+ next_inst = Instruction::At(insns + dex_pc + dec_insn.vB);
+ }
+ break;
+ }
+ case Instruction::IF_GTZ: {
+ if (shadow_frame.GetVReg(dec_insn.vA) > 0) {
+ uint32_t dex_pc = inst->GetDexPc(insns);
+ next_inst = Instruction::At(insns + dex_pc + dec_insn.vB);
+ }
+ break;
+ }
+ case Instruction::IF_LEZ: {
+ if (shadow_frame.GetVReg(dec_insn.vA) <= 0) {
+ uint32_t dex_pc = inst->GetDexPc(insns);
+ next_inst = Instruction::At(insns + dex_pc + dec_insn.vB);
+ }
+ break;
+ }
+ case Instruction::AGET_BOOLEAN: {
+ BooleanArray* a = shadow_frame.GetReference(dec_insn.vB)->AsBooleanArray();
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns));
+ break;
+ }
+ int32_t index = shadow_frame.GetVReg(dec_insn.vC);
+ shadow_frame.SetVReg(dec_insn.vA, a->Get(index));
+ break;
+ }
+ case Instruction::AGET_BYTE: {
+ ByteArray* a = shadow_frame.GetReference(dec_insn.vB)->AsByteArray();
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns));
+ break;
+ }
+ int32_t index = shadow_frame.GetVReg(dec_insn.vC);
+ shadow_frame.SetVReg(dec_insn.vA, a->Get(index));
+ break;
+ }
+ case Instruction::AGET_CHAR: {
+ CharArray* a = shadow_frame.GetReference(dec_insn.vB)->AsCharArray();
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns));
+ break;
+ }
+ int32_t index = shadow_frame.GetVReg(dec_insn.vC);
+ shadow_frame.SetVReg(dec_insn.vA, a->Get(index));
+ break;
+ }
+ case Instruction::AGET_SHORT: {
+ ShortArray* a = shadow_frame.GetReference(dec_insn.vB)->AsShortArray();
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns));
+ break;
+ }
+ int32_t index = shadow_frame.GetVReg(dec_insn.vC);
+ shadow_frame.SetVReg(dec_insn.vA, a->Get(index));
+ break;
+ }
+ case Instruction::AGET: {
+ IntArray* a = shadow_frame.GetReference(dec_insn.vB)->AsIntArray();
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns));
+ break;
+ }
+ int32_t index = shadow_frame.GetVReg(dec_insn.vC);
+ shadow_frame.SetVReg(dec_insn.vA, a->Get(index));
+ break;
+ }
+ case Instruction::AGET_WIDE: {
+ LongArray* a = shadow_frame.GetReference(dec_insn.vB)->AsLongArray();
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns));
+ break;
+ }
+ int32_t index = shadow_frame.GetVReg(dec_insn.vC);
+ shadow_frame.SetVRegLong(dec_insn.vA, a->Get(index));
+ break;
+ }
+ case Instruction::AGET_OBJECT: {
+ ObjectArray<Object>* a = shadow_frame.GetReference(dec_insn.vB)->AsObjectArray<Object>();
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns));
+ break;
+ }
+ int32_t index = shadow_frame.GetVReg(dec_insn.vC);
+ Object* o = a->Get(index);
+ shadow_frame.SetReferenceAndVReg(dec_insn.vA, o);
+ break;
+ }
+ case Instruction::APUT_BOOLEAN: {
+ uint8_t val = shadow_frame.GetVReg(dec_insn.vA);
+ BooleanArray* a = shadow_frame.GetReference(dec_insn.vB)->AsBooleanArray();
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns));
+ break;
+ }
+ int32_t index = shadow_frame.GetVReg(dec_insn.vC);
+ a->Set(index, val);
+ break;
+ }
+ case Instruction::APUT_BYTE: {
+ int8_t val = shadow_frame.GetVReg(dec_insn.vA);
+ ByteArray* a = shadow_frame.GetReference(dec_insn.vB)->AsByteArray();
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns));
+ break;
+ }
+ int32_t index = shadow_frame.GetVReg(dec_insn.vC);
+ a->Set(index, val);
+ break;
+ }
+ case Instruction::APUT_CHAR: {
+ uint16_t val = shadow_frame.GetVReg(dec_insn.vA);
+ CharArray* a = shadow_frame.GetReference(dec_insn.vB)->AsCharArray();
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns));
+ break;
+ }
+ int32_t index = shadow_frame.GetVReg(dec_insn.vC);
+ a->Set(index, val);
+ break;
+ }
+ case Instruction::APUT_SHORT: {
+ int16_t val = shadow_frame.GetVReg(dec_insn.vA);
+ ShortArray* a = shadow_frame.GetReference(dec_insn.vB)->AsShortArray();
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns));
+ break;
+ }
+ int32_t index = shadow_frame.GetVReg(dec_insn.vC);
+ a->Set(index, val);
+ break;
+ }
+ case Instruction::APUT: {
+ int32_t val = shadow_frame.GetVReg(dec_insn.vA);
+ IntArray* a = shadow_frame.GetReference(dec_insn.vB)->AsIntArray();
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns));
+ break;
+ }
+ int32_t index = shadow_frame.GetVReg(dec_insn.vC);
+ a->Set(index, val);
+ break;
+ }
+ case Instruction::APUT_WIDE: {
+ int64_t val = shadow_frame.GetVRegLong(dec_insn.vA);
+ LongArray* a = shadow_frame.GetReference(dec_insn.vB)->AsLongArray();
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns));
+ break;
+ }
+ int32_t index = shadow_frame.GetVReg(dec_insn.vC);
+ a->Set(index, val);
+ break;
+ }
+ case Instruction::APUT_OBJECT: {
+ Object* val = shadow_frame.GetReference(dec_insn.vA);
+ ObjectArray<Object>* a = shadow_frame.GetReference(dec_insn.vB)->AsObjectArray<Object>();
+ if (UNLIKELY(a == NULL)) {
+ ThrowNullPointerExceptionFromDexPC(shadow_frame.GetMethod(), inst->GetDexPc(insns));
+ break;
+ }
+ int32_t index = shadow_frame.GetVReg(dec_insn.vC);
+ a->Set(index, val);
+ break;
+ }
+ case Instruction::IGET_BOOLEAN:
+ DoFieldGet(self, shadow_frame, dec_insn, InstancePrimitiveRead, Primitive::kPrimBoolean);
+ break;
+ case Instruction::IGET_BYTE:
+ DoFieldGet(self, shadow_frame, dec_insn, InstancePrimitiveRead, Primitive::kPrimByte);
+ break;
+ case Instruction::IGET_CHAR:
+ DoFieldGet(self, shadow_frame, dec_insn, InstancePrimitiveRead, Primitive::kPrimChar);
+ break;
+ case Instruction::IGET_SHORT:
+ DoFieldGet(self, shadow_frame, dec_insn, InstancePrimitiveRead, Primitive::kPrimShort);
+ break;
+ case Instruction::IGET:
+ DoFieldGet(self, shadow_frame, dec_insn, InstancePrimitiveRead, Primitive::kPrimInt);
+ break;
+ case Instruction::IGET_WIDE:
+ DoFieldGet(self, shadow_frame, dec_insn, InstancePrimitiveRead, Primitive::kPrimLong);
+ break;
+ case Instruction::IGET_OBJECT:
+ DoFieldGet(self, shadow_frame, dec_insn, InstanceObjectRead, Primitive::kPrimNot);
+ break;
+ case Instruction::SGET_BOOLEAN:
+ DoFieldGet(self, shadow_frame, dec_insn, StaticPrimitiveRead, Primitive::kPrimBoolean);
+ break;
+ case Instruction::SGET_BYTE:
+ DoFieldGet(self, shadow_frame, dec_insn, StaticPrimitiveRead, Primitive::kPrimByte);
+ break;
+ case Instruction::SGET_CHAR:
+ DoFieldGet(self, shadow_frame, dec_insn, StaticPrimitiveRead, Primitive::kPrimChar);
+ break;
+ case Instruction::SGET_SHORT:
+ DoFieldGet(self, shadow_frame, dec_insn, StaticPrimitiveRead, Primitive::kPrimShort);
+ break;
+ case Instruction::SGET:
+ DoFieldGet(self, shadow_frame, dec_insn, StaticPrimitiveRead, Primitive::kPrimInt);
+ break;
+ case Instruction::SGET_WIDE:
+ DoFieldGet(self, shadow_frame, dec_insn, StaticPrimitiveRead, Primitive::kPrimLong);
+ break;
+ case Instruction::SGET_OBJECT:
+ DoFieldGet(self, shadow_frame, dec_insn, StaticObjectRead, Primitive::kPrimNot);
+ break;
+ case Instruction::IPUT_BOOLEAN:
+ DoFieldPut(self, shadow_frame, dec_insn, InstancePrimitiveWrite, Primitive::kPrimBoolean);
+ break;
+ case Instruction::IPUT_BYTE:
+ DoFieldPut(self, shadow_frame, dec_insn, InstancePrimitiveWrite, Primitive::kPrimByte);
+ break;
+ case Instruction::IPUT_CHAR:
+ DoFieldPut(self, shadow_frame, dec_insn, InstancePrimitiveWrite, Primitive::kPrimChar);
+ break;
+ case Instruction::IPUT_SHORT:
+ DoFieldPut(self, shadow_frame, dec_insn, InstancePrimitiveWrite, Primitive::kPrimShort);
+ break;
+ case Instruction::IPUT:
+ DoFieldPut(self, shadow_frame, dec_insn, InstancePrimitiveWrite, Primitive::kPrimInt);
+ break;
+ case Instruction::IPUT_WIDE:
+ DoFieldPut(self, shadow_frame, dec_insn, InstancePrimitiveWrite, Primitive::kPrimLong);
+ break;
+ case Instruction::IPUT_OBJECT:
+ DoFieldPut(self, shadow_frame, dec_insn, InstanceObjectWrite, Primitive::kPrimNot);
+ break;
+ case Instruction::SPUT_BOOLEAN:
+ DoFieldPut(self, shadow_frame, dec_insn, StaticPrimitiveWrite, Primitive::kPrimBoolean);
+ break;
+ case Instruction::SPUT_BYTE:
+ DoFieldPut(self, shadow_frame, dec_insn, StaticPrimitiveWrite, Primitive::kPrimByte);
+ break;
+ case Instruction::SPUT_CHAR:
+ DoFieldPut(self, shadow_frame, dec_insn, StaticPrimitiveWrite, Primitive::kPrimChar);
+ break;
+ case Instruction::SPUT_SHORT:
+ DoFieldPut(self, shadow_frame, dec_insn, StaticPrimitiveWrite, Primitive::kPrimShort);
+ break;
+ case Instruction::SPUT:
+ DoFieldPut(self, shadow_frame, dec_insn, StaticPrimitiveWrite, Primitive::kPrimInt);
+ break;
+ case Instruction::SPUT_WIDE:
+ DoFieldPut(self, shadow_frame, dec_insn, StaticPrimitiveWrite, Primitive::kPrimLong);
+ break;
+ case Instruction::SPUT_OBJECT:
+ DoFieldPut(self, shadow_frame, dec_insn, StaticObjectWrite, Primitive::kPrimNot);
+ break;
+ case Instruction::INVOKE_VIRTUAL:
+ DoInvoke(self, mh, shadow_frame, dec_insn, kVirtual, false, &result_register);
+ break;
+ case Instruction::INVOKE_VIRTUAL_RANGE:
+ DoInvoke(self, mh, shadow_frame, dec_insn, kVirtual, true, &result_register);
+ break;
+ case Instruction::INVOKE_SUPER:
+ DoInvoke(self, mh, shadow_frame, dec_insn, kSuper, false, &result_register);
+ break;
+ case Instruction::INVOKE_SUPER_RANGE:
+ DoInvoke(self, mh, shadow_frame, dec_insn, kSuper, true, &result_register);
+ break;
+ case Instruction::INVOKE_DIRECT:
+ DoInvoke(self, mh, shadow_frame, dec_insn, kDirect, false, &result_register);
+ break;
+ case Instruction::INVOKE_DIRECT_RANGE:
+ DoInvoke(self, mh, shadow_frame, dec_insn, kDirect, true, &result_register);
+ break;
+ case Instruction::INVOKE_INTERFACE:
+ DoInvoke(self, mh, shadow_frame, dec_insn, kInterface, false, &result_register);
+ break;
+ case Instruction::INVOKE_INTERFACE_RANGE:
+ DoInvoke(self, mh, shadow_frame, dec_insn, kInterface, true, &result_register);
+ break;
+ case Instruction::INVOKE_STATIC:
+ DoInvoke(self, mh, shadow_frame, dec_insn, kStatic, false, &result_register);
+ break;
+ case Instruction::INVOKE_STATIC_RANGE:
+ DoInvoke(self, mh, shadow_frame, dec_insn, kStatic, true, &result_register);
+ break;
+ case Instruction::NEG_INT:
+ shadow_frame.SetVReg(dec_insn.vA, -shadow_frame.GetVReg(dec_insn.vB));
+ break;
+ case Instruction::NOT_INT:
+ shadow_frame.SetVReg(dec_insn.vA, 0 ^ shadow_frame.GetVReg(dec_insn.vB));
+ break;
+ case Instruction::NEG_LONG:
+ shadow_frame.SetVRegLong(dec_insn.vA, -shadow_frame.GetVRegLong(dec_insn.vB));
+ break;
+ case Instruction::NOT_LONG:
+ shadow_frame.SetVRegLong(dec_insn.vA, 0 ^ shadow_frame.GetVRegLong(dec_insn.vB));
+ break;
+ case Instruction::NEG_FLOAT:
+ shadow_frame.SetVRegFloat(dec_insn.vA, -shadow_frame.GetVRegFloat(dec_insn.vB));
+ break;
+ case Instruction::NEG_DOUBLE:
+ shadow_frame.SetVRegDouble(dec_insn.vA, -shadow_frame.GetVRegDouble(dec_insn.vB));
+ break;
+ case Instruction::INT_TO_LONG:
+ shadow_frame.SetVRegLong(dec_insn.vA, shadow_frame.GetVReg(dec_insn.vB));
+ break;
+ case Instruction::INT_TO_FLOAT:
+ shadow_frame.SetVRegFloat(dec_insn.vA, shadow_frame.GetVReg(dec_insn.vB));
+ break;
+ case Instruction::INT_TO_DOUBLE:
+ shadow_frame.SetVRegDouble(dec_insn.vA, shadow_frame.GetVReg(dec_insn.vB));
+ break;
+ case Instruction::LONG_TO_INT:
+ shadow_frame.SetVReg(dec_insn.vA, shadow_frame.GetVRegLong(dec_insn.vB));
+ break;
+ case Instruction::LONG_TO_FLOAT:
+ shadow_frame.SetVRegFloat(dec_insn.vA, shadow_frame.GetVRegLong(dec_insn.vB));
+ break;
+ case Instruction::LONG_TO_DOUBLE:
+ shadow_frame.SetVRegDouble(dec_insn.vA, shadow_frame.GetVRegLong(dec_insn.vB));
+ break;
+ case Instruction::FLOAT_TO_INT:
+ shadow_frame.SetVReg(dec_insn.vA, shadow_frame.GetVRegFloat(dec_insn.vB));
+ break;
+ case Instruction::FLOAT_TO_LONG:
+ shadow_frame.SetVRegLong(dec_insn.vA, shadow_frame.GetVRegFloat(dec_insn.vB));
+ break;
+ case Instruction::FLOAT_TO_DOUBLE:
+ shadow_frame.SetVRegDouble(dec_insn.vA, shadow_frame.GetVRegFloat(dec_insn.vB));
+ break;
+ case Instruction::DOUBLE_TO_INT:
+ shadow_frame.SetVReg(dec_insn.vA, shadow_frame.GetVRegDouble(dec_insn.vB));
+ break;
+ case Instruction::DOUBLE_TO_LONG:
+ shadow_frame.SetVRegLong(dec_insn.vA, shadow_frame.GetVRegDouble(dec_insn.vB));
+ break;
+ case Instruction::DOUBLE_TO_FLOAT:
+ shadow_frame.SetVRegFloat(dec_insn.vA, shadow_frame.GetVRegDouble(dec_insn.vB));
+ break;
+ case Instruction::INT_TO_BYTE:
+ shadow_frame.SetVReg(dec_insn.vA, static_cast<int8_t>(shadow_frame.GetVReg(dec_insn.vB)));
+ break;
+ case Instruction::INT_TO_CHAR:
+ shadow_frame.SetVReg(dec_insn.vA, static_cast<uint16_t>(shadow_frame.GetVReg(dec_insn.vB)));
+ break;
+ case Instruction::INT_TO_SHORT:
+ shadow_frame.SetVReg(dec_insn.vA, static_cast<int16_t>(shadow_frame.GetVReg(dec_insn.vB)));
+ break;
+ case Instruction::ADD_INT:
+ shadow_frame.SetVReg(dec_insn.vA,
+ shadow_frame.GetVReg(dec_insn.vB) + shadow_frame.GetVReg(dec_insn.vC));
+ break;
+ case Instruction::SUB_INT:
+ shadow_frame.SetVReg(dec_insn.vA,
+ shadow_frame.GetVReg(dec_insn.vB) - shadow_frame.GetVReg(dec_insn.vC));
+ break;
+ case Instruction::MUL_INT:
+ shadow_frame.SetVReg(dec_insn.vA,
+ shadow_frame.GetVReg(dec_insn.vB) * shadow_frame.GetVReg(dec_insn.vC));
+ break;
+ case Instruction::REM_INT:
+ shadow_frame.SetVReg(dec_insn.vA,
+ shadow_frame.GetVReg(dec_insn.vB) % shadow_frame.GetVReg(dec_insn.vC));
+ break;
+ case Instruction::DIV_INT:
+ shadow_frame.SetVReg(dec_insn.vA,
+ shadow_frame.GetVReg(dec_insn.vB) / shadow_frame.GetVReg(dec_insn.vC));
+ break;
+ case Instruction::SHL_INT:
+ shadow_frame.SetVReg(dec_insn.vA,
+ shadow_frame.GetVReg(dec_insn.vB) << shadow_frame.GetVReg(dec_insn.vC));
+ break;
+ case Instruction::SHR_INT:
+ shadow_frame.SetVReg(dec_insn.vA,
+ shadow_frame.GetVReg(dec_insn.vB) >> shadow_frame.GetVReg(dec_insn.vC));
+ break;
+ case Instruction::USHR_INT:
+ shadow_frame.SetVReg(dec_insn.vA,
+ static_cast<uint32_t>(shadow_frame.GetVReg(dec_insn.vB)) >>
+ shadow_frame.GetVReg(dec_insn.vC));
+ break;
+ case Instruction::AND_INT:
+ shadow_frame.SetVReg(dec_insn.vA,
+ shadow_frame.GetVReg(dec_insn.vB) & shadow_frame.GetVReg(dec_insn.vC));
+ break;
+ case Instruction::OR_INT:
+ shadow_frame.SetVReg(dec_insn.vA,
+ shadow_frame.GetVReg(dec_insn.vB) | shadow_frame.GetVReg(dec_insn.vC));
+ break;
+ case Instruction::XOR_INT:
+ shadow_frame.SetVReg(dec_insn.vA,
+ shadow_frame.GetVReg(dec_insn.vB) ^ shadow_frame.GetVReg(dec_insn.vC));
+ break;
+ case Instruction::ADD_LONG:
+ shadow_frame.SetVRegLong(dec_insn.vA,
+ shadow_frame.GetVRegLong(dec_insn.vB) +
+ shadow_frame.GetVRegLong(dec_insn.vC));
+ break;
+ case Instruction::SUB_LONG:
+ shadow_frame.SetVRegLong(dec_insn.vA,
+ shadow_frame.GetVRegLong(dec_insn.vB) -
+ shadow_frame.GetVRegLong(dec_insn.vC));
+ break;
+ case Instruction::MUL_LONG:
+ shadow_frame.SetVRegLong(dec_insn.vA,
+ shadow_frame.GetVRegLong(dec_insn.vB) *
+ shadow_frame.GetVRegLong(dec_insn.vC));
+ break;
+ case Instruction::DIV_LONG:
+ shadow_frame.SetVRegLong(dec_insn.vA,
+ shadow_frame.GetVRegLong(dec_insn.vB) /
+ shadow_frame.GetVRegLong(dec_insn.vC));
+ break;
+ case Instruction::REM_LONG:
+ shadow_frame.SetVRegLong(dec_insn.vA,
+ shadow_frame.GetVRegLong(dec_insn.vB) %
+ shadow_frame.GetVRegLong(dec_insn.vC));
+ break;
+ case Instruction::AND_LONG:
+ shadow_frame.SetVRegLong(dec_insn.vA,
+ shadow_frame.GetVRegLong(dec_insn.vB) &
+ shadow_frame.GetVRegLong(dec_insn.vC));
+ break;
+ case Instruction::OR_LONG:
+ shadow_frame.SetVRegLong(dec_insn.vA,
+ shadow_frame.GetVRegLong(dec_insn.vB) |
+ shadow_frame.GetVRegLong(dec_insn.vC));
+ break;
+ case Instruction::XOR_LONG:
+ shadow_frame.SetVRegLong(dec_insn.vA,
+ shadow_frame.GetVRegLong(dec_insn.vB) ^
+ shadow_frame.GetVRegLong(dec_insn.vC));
+ break;
+ case Instruction::SHL_LONG:
+ shadow_frame.SetVRegLong(dec_insn.vA,
+ shadow_frame.GetVRegLong(dec_insn.vB) <<
+ shadow_frame.GetVReg(dec_insn.vC));
+ break;
+ case Instruction::SHR_LONG:
+ shadow_frame.SetVRegLong(dec_insn.vA,
+ shadow_frame.GetVRegLong(dec_insn.vB) >>
+ shadow_frame.GetVReg(dec_insn.vC));
+ break;
+ case Instruction::USHR_LONG:
+ shadow_frame.SetVRegLong(dec_insn.vA,
+ static_cast<uint64_t>(shadow_frame.GetVRegLong(dec_insn.vB)) >>
+ shadow_frame.GetVReg(dec_insn.vC));
+ break;
+ case Instruction::ADD_FLOAT:
+ shadow_frame.SetVRegFloat(dec_insn.vA,
+ shadow_frame.GetVRegFloat(dec_insn.vB) +
+ shadow_frame.GetVRegFloat(dec_insn.vC));
+ break;
+ case Instruction::SUB_FLOAT:
+ shadow_frame.SetVRegFloat(dec_insn.vA,
+ shadow_frame.GetVRegFloat(dec_insn.vB) -
+ shadow_frame.GetVRegFloat(dec_insn.vC));
+ break;
+ case Instruction::MUL_FLOAT:
+ shadow_frame.SetVRegFloat(dec_insn.vA,
+ shadow_frame.GetVRegFloat(dec_insn.vB) *
+ shadow_frame.GetVRegFloat(dec_insn.vC));
+ break;
+ case Instruction::DIV_FLOAT:
+ shadow_frame.SetVRegFloat(dec_insn.vA,
+ shadow_frame.GetVRegFloat(dec_insn.vB) /
+ shadow_frame.GetVRegFloat(dec_insn.vC));
+ break;
+ case Instruction::REM_FLOAT:
+ shadow_frame.SetVRegFloat(dec_insn.vA,
+ fmodf(shadow_frame.GetVRegFloat(dec_insn.vB),
+ shadow_frame.GetVRegFloat(dec_insn.vC)));
+ break;
+ case Instruction::ADD_DOUBLE:
+ shadow_frame.SetVRegDouble(dec_insn.vA,
+ shadow_frame.GetVRegDouble(dec_insn.vB) +
+ shadow_frame.GetVRegDouble(dec_insn.vC));
+ break;
+ case Instruction::SUB_DOUBLE:
+ shadow_frame.SetVRegDouble(dec_insn.vA,
+ shadow_frame.GetVRegDouble(dec_insn.vB) -
+ shadow_frame.GetVRegDouble(dec_insn.vC));
+ break;
+ case Instruction::MUL_DOUBLE:
+ shadow_frame.SetVRegDouble(dec_insn.vA,
+ shadow_frame.GetVRegDouble(dec_insn.vB) *
+ shadow_frame.GetVRegDouble(dec_insn.vC));
+ break;
+ case Instruction::DIV_DOUBLE:
+ shadow_frame.SetVRegDouble(dec_insn.vA,
+ shadow_frame.GetVRegDouble(dec_insn.vB) /
+ shadow_frame.GetVRegDouble(dec_insn.vC));
+ break;
+ case Instruction::REM_DOUBLE:
+ shadow_frame.SetVRegDouble(dec_insn.vA,
+ fmod(shadow_frame.GetVRegDouble(dec_insn.vB),
+ shadow_frame.GetVRegDouble(dec_insn.vC)));
+ break;
+ case Instruction::ADD_INT_2ADDR:
+ shadow_frame.SetVReg(dec_insn.vA,
+ shadow_frame.GetVReg(dec_insn.vA) + shadow_frame.GetVReg(dec_insn.vB));
+ break;
+ case Instruction::SUB_INT_2ADDR:
+ shadow_frame.SetVReg(dec_insn.vA,
+ shadow_frame.GetVReg(dec_insn.vA) - shadow_frame.GetVReg(dec_insn.vB));
+ break;
+ case Instruction::MUL_INT_2ADDR:
+ shadow_frame.SetVReg(dec_insn.vA,
+ shadow_frame.GetVReg(dec_insn.vA) * shadow_frame.GetVReg(dec_insn.vB));
+ break;
+ case Instruction::REM_INT_2ADDR:
+ shadow_frame.SetVReg(dec_insn.vA,
+ shadow_frame.GetVReg(dec_insn.vA) % shadow_frame.GetVReg(dec_insn.vB));
+ break;
+ case Instruction::SHL_INT_2ADDR:
+ shadow_frame.SetVReg(dec_insn.vA,
+ shadow_frame.GetVReg(dec_insn.vA) << shadow_frame.GetVReg(dec_insn.vB));
+ break;
+ case Instruction::SHR_INT_2ADDR:
+ shadow_frame.SetVReg(dec_insn.vA,
+ shadow_frame.GetVReg(dec_insn.vA) >> shadow_frame.GetVReg(dec_insn.vB));
+ break;
+ case Instruction::USHR_INT_2ADDR:
+ shadow_frame.SetVReg(dec_insn.vA,
+ static_cast<uint32_t>(shadow_frame.GetVReg(dec_insn.vA)) >>
+ shadow_frame.GetVReg(dec_insn.vB));
+ break;
+ case Instruction::AND_INT_2ADDR:
+ shadow_frame.SetVReg(dec_insn.vA,
+ shadow_frame.GetVReg(dec_insn.vA) & shadow_frame.GetVReg(dec_insn.vB));
+ break;
+ case Instruction::OR_INT_2ADDR:
+ shadow_frame.SetVReg(dec_insn.vA,
+ shadow_frame.GetVReg(dec_insn.vA) | shadow_frame.GetVReg(dec_insn.vB));
+ break;
+ case Instruction::XOR_INT_2ADDR:
+ shadow_frame.SetVReg(dec_insn.vA,
+ shadow_frame.GetVReg(dec_insn.vA) ^ shadow_frame.GetVReg(dec_insn.vB));
+ break;
+ case Instruction::DIV_INT_2ADDR:
+ shadow_frame.SetVReg(dec_insn.vA,
+ shadow_frame.GetVReg(dec_insn.vA) / shadow_frame.GetVReg(dec_insn.vB));
+ break;
+ case Instruction::ADD_LONG_2ADDR:
+ shadow_frame.SetVRegLong(dec_insn.vA,
+ shadow_frame.GetVRegLong(dec_insn.vA) +
+ shadow_frame.GetVRegLong(dec_insn.vB));
+ break;
+ case Instruction::SUB_LONG_2ADDR:
+ shadow_frame.SetVRegLong(dec_insn.vA,
+ shadow_frame.GetVRegLong(dec_insn.vA) -
+ shadow_frame.GetVRegLong(dec_insn.vB));
+ break;
+ case Instruction::MUL_LONG_2ADDR:
+ shadow_frame.SetVRegLong(dec_insn.vA,
+ shadow_frame.GetVRegLong(dec_insn.vA) +
+ shadow_frame.GetVRegLong(dec_insn.vB));
+ break;
+ case Instruction::DIV_LONG_2ADDR:
+ shadow_frame.SetVRegLong(dec_insn.vA,
+ shadow_frame.GetVRegLong(dec_insn.vA) /
+ shadow_frame.GetVRegLong(dec_insn.vB));
+ break;
+ case Instruction::REM_LONG_2ADDR:
+ shadow_frame.SetVRegLong(dec_insn.vA,
+ shadow_frame.GetVRegLong(dec_insn.vA) %
+ shadow_frame.GetVRegLong(dec_insn.vB));
+ break;
+ case Instruction::AND_LONG_2ADDR:
+ shadow_frame.SetVRegLong(dec_insn.vA,
+ shadow_frame.GetVRegLong(dec_insn.vA) &
+ shadow_frame.GetVRegLong(dec_insn.vB));
+ break;
+ case Instruction::OR_LONG_2ADDR:
+ shadow_frame.SetVRegLong(dec_insn.vA,
+ shadow_frame.GetVRegLong(dec_insn.vA) |
+ shadow_frame.GetVRegLong(dec_insn.vB));
+ break;
+ case Instruction::XOR_LONG_2ADDR:
+ shadow_frame.SetVRegLong(dec_insn.vA,
+ shadow_frame.GetVRegLong(dec_insn.vA) ^
+ shadow_frame.GetVRegLong(dec_insn.vB));
+ break;
+ case Instruction::SHL_LONG_2ADDR:
+ shadow_frame.SetVRegLong(dec_insn.vA,
+ shadow_frame.GetVRegLong(dec_insn.vA) <<
+ shadow_frame.GetVReg(dec_insn.vB));
+ break;
+ case Instruction::SHR_LONG_2ADDR:
+ shadow_frame.SetVRegLong(dec_insn.vA,
+ shadow_frame.GetVRegLong(dec_insn.vA) >>
+ shadow_frame.GetVReg(dec_insn.vB));
+ break;
+ case Instruction::USHR_LONG_2ADDR:
+ shadow_frame.SetVRegLong(dec_insn.vA,
+ static_cast<uint64_t>(shadow_frame.GetVRegLong(dec_insn.vA)) >>
+ shadow_frame.GetVReg(dec_insn.vB));
+ break;
+ case Instruction::ADD_FLOAT_2ADDR:
+ shadow_frame.SetVRegFloat(dec_insn.vA,
+ shadow_frame.GetVRegFloat(dec_insn.vA) +
+ shadow_frame.GetVRegFloat(dec_insn.vB));
+ break;
+ case Instruction::SUB_FLOAT_2ADDR:
+ shadow_frame.SetVRegFloat(dec_insn.vA,
+ shadow_frame.GetVRegFloat(dec_insn.vA) -
+ shadow_frame.GetVRegFloat(dec_insn.vB));
+ break;
+ case Instruction::MUL_FLOAT_2ADDR:
+ shadow_frame.SetVRegFloat(dec_insn.vA,
+ shadow_frame.GetVRegFloat(dec_insn.vA) *
+ shadow_frame.GetVRegFloat(dec_insn.vB));
+ break;
+ case Instruction::DIV_FLOAT_2ADDR:
+ shadow_frame.SetVRegFloat(dec_insn.vA,
+ shadow_frame.GetVRegFloat(dec_insn.vA) /
+ shadow_frame.GetVRegFloat(dec_insn.vB));
+ break;
+ case Instruction::REM_FLOAT_2ADDR:
+ shadow_frame.SetVRegFloat(dec_insn.vA,
+ fmodf(shadow_frame.GetVRegFloat(dec_insn.vA),
+ shadow_frame.GetVRegFloat(dec_insn.vB)));
+ break;
+ case Instruction::ADD_DOUBLE_2ADDR:
+ shadow_frame.SetVRegDouble(dec_insn.vA,
+ shadow_frame.GetVRegDouble(dec_insn.vA) +
+ shadow_frame.GetVRegDouble(dec_insn.vB));
+ break;
+ case Instruction::SUB_DOUBLE_2ADDR:
+ shadow_frame.SetVRegDouble(dec_insn.vA,
+ shadow_frame.GetVRegDouble(dec_insn.vA) -
+ shadow_frame.GetVRegDouble(dec_insn.vB));
+ break;
+ case Instruction::MUL_DOUBLE_2ADDR:
+ shadow_frame.SetVRegDouble(dec_insn.vA,
+ shadow_frame.GetVRegDouble(dec_insn.vA) *
+ shadow_frame.GetVRegDouble(dec_insn.vB));
+ break;
+ case Instruction::DIV_DOUBLE_2ADDR:
+ shadow_frame.SetVRegDouble(dec_insn.vA,
+ shadow_frame.GetVRegDouble(dec_insn.vA) /
+ shadow_frame.GetVRegDouble(dec_insn.vB));
+ break;
+ case Instruction::REM_DOUBLE_2ADDR:
+ shadow_frame.SetVRegDouble(dec_insn.vA,
+ fmod(shadow_frame.GetVRegDouble(dec_insn.vA),
+ shadow_frame.GetVRegDouble(dec_insn.vB)));
+ break;
+ case Instruction::ADD_INT_LIT16:
+ case Instruction::ADD_INT_LIT8:
+ shadow_frame.SetVReg(dec_insn.vA, shadow_frame.GetVReg(dec_insn.vB) + dec_insn.vC);
+ break;
+ case Instruction::RSUB_INT:
+ case Instruction::RSUB_INT_LIT8:
+ shadow_frame.SetVReg(dec_insn.vA, dec_insn.vC - shadow_frame.GetVReg(dec_insn.vB));
+ break;
+ case Instruction::MUL_INT_LIT16:
+ case Instruction::MUL_INT_LIT8:
+ shadow_frame.SetVReg(dec_insn.vA, shadow_frame.GetVReg(dec_insn.vB) * dec_insn.vC);
+ break;
+ case Instruction::DIV_INT_LIT16:
+ case Instruction::DIV_INT_LIT8:
+ shadow_frame.SetVReg(dec_insn.vA, shadow_frame.GetVReg(dec_insn.vB) / dec_insn.vC);
+ break;
+ case Instruction::REM_INT_LIT16:
+ case Instruction::REM_INT_LIT8:
+ shadow_frame.SetVReg(dec_insn.vA, shadow_frame.GetVReg(dec_insn.vB) % dec_insn.vC);
+ break;
+ case Instruction::AND_INT_LIT16:
+ case Instruction::AND_INT_LIT8:
+ shadow_frame.SetVReg(dec_insn.vA, shadow_frame.GetVReg(dec_insn.vB) & dec_insn.vC);
+ break;
+ case Instruction::OR_INT_LIT16:
+ case Instruction::OR_INT_LIT8:
+ shadow_frame.SetVReg(dec_insn.vA, shadow_frame.GetVReg(dec_insn.vB) | dec_insn.vC);
+ break;
+ case Instruction::XOR_INT_LIT16:
+ case Instruction::XOR_INT_LIT8:
+ shadow_frame.SetVReg(dec_insn.vA, shadow_frame.GetVReg(dec_insn.vB) ^ dec_insn.vC);
+ break;
+ case Instruction::SHL_INT_LIT8:
+ shadow_frame.SetVReg(dec_insn.vA, shadow_frame.GetVReg(dec_insn.vB) << dec_insn.vC);
+ break;
+ case Instruction::SHR_INT_LIT8:
+ shadow_frame.SetVReg(dec_insn.vA, shadow_frame.GetVReg(dec_insn.vB) >> dec_insn.vC);
+ break;
+ case Instruction::USHR_INT_LIT8:
+ shadow_frame.SetVReg(dec_insn.vA,
+ static_cast<uint32_t>(shadow_frame.GetVReg(dec_insn.vB)) >>
+ dec_insn.vC);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected instruction: " << inst->DumpString(&mh.GetDexFile());
+ break;
+ }
+ if (UNLIKELY(self->IsExceptionPending())) {
+ uint32_t found_dex_pc =
+ shadow_frame.GetMethod()->FindCatchBlock(self->GetException()->GetClass(),
+ inst->GetDexPc(insns));
+ if (found_dex_pc == DexFile::kDexNoIndex) {
+ JValue result;
+ result.SetJ(0);
+ return result; // Handler in caller.
+ } else {
+ next_inst = Instruction::At(insns + found_dex_pc);
+ }
+ }
+ inst = next_inst;
+ }
+}
+
+void EnterInterpreterFromInvoke(Thread* self, AbstractMethod* method, Object* receiver,
+ JValue* args, JValue* result) {
+ MethodHelper mh(method);
+ const DexFile::CodeItem* code_item = mh.GetCodeItem();
+ uint16_t num_regs;
+ uint16_t num_ins;
+ if (code_item != NULL) {
+ num_regs = code_item->registers_size_;
+ num_ins = code_item->ins_size_;
+ } else {
+ DCHECK(method->IsNative());
+ num_regs = num_ins = AbstractMethod::NumArgRegisters(mh.GetShorty());
+ if (!method->IsStatic()) {
+ num_regs++;
+ num_ins++;
+ }
+ }
+ // Set up shadow frame with matching number of reference slots to vregs.
+ ShadowFrame* last_shadow_frame = self->GetManagedStack()->GetTopShadowFrame();
+ UniquePtr<ShadowFrame> shadow_frame(ShadowFrame::Create(num_regs, num_regs,
+ (last_shadow_frame == NULL) ? NULL : last_shadow_frame->GetLink(),
+ method, 0));
+ self->PushShadowFrame(shadow_frame.get());
+ size_t cur_reg = num_regs - num_ins;
+ if (!method->IsStatic()) {
+ CHECK(receiver != NULL);
+ shadow_frame->SetReferenceAndVReg(cur_reg, receiver);
+ ++cur_reg;
+ } else if (!method->GetDeclaringClass()->IsInitializing()) {
+ Runtime::Current()->GetClassLinker()->EnsureInitialized(method->GetDeclaringClass(),
+ true, true);
+ CHECK(method->GetDeclaringClass()->IsInitializing());
+ }
+ StringPiece shorty(mh.GetShorty());
+ size_t arg_pos = 0;
+ for (; cur_reg < num_regs; ++cur_reg, ++arg_pos) {
+ DCHECK_LT(arg_pos + 1, mh.GetShortyLength());
+ switch (shorty[arg_pos + 1]) {
+ case 'L': {
+ Object* o = args[arg_pos].GetL();
+ shadow_frame->SetReferenceAndVReg(cur_reg, o);
+ break;
+ }
+ case 'J': case 'D':
+ shadow_frame->SetVRegLong(cur_reg, args[arg_pos].GetJ());
+ cur_reg++;
+ break;
+ default:
+ shadow_frame->SetVReg(cur_reg, args[arg_pos].GetI());
+ break;
+ }
+ }
+ if (!method->IsNative()) {
+ JValue r = Execute(self, mh, code_item, *shadow_frame.get());
+ if (result != NULL) {
+ *result = r;
+ }
+ } else {
+ // TODO: The following enters JNI code using a typedef-ed function rather than the JNI compiler,
+ // it should be removed and JNI compiled stubs used instead.
+ ScopedObjectAccessUnchecked soa(self);
+ if (method->IsStatic()) {
+ if (shorty == "L") {
+ typedef jobject (fnptr)(JNIEnv*, jclass);
+ fnptr* fn = reinterpret_cast<fnptr*>(method->GetNativeMethod());
+ ScopedLocalRef<jclass> klass(soa.Env(),
+ soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
+ ScopedThreadStateChange tsc(self, kNative);
+ result->SetL(soa.Decode<Object*>(fn(soa.Env(), klass.get())));
+ } else if (shorty == "V") {
+ typedef void (fnptr)(JNIEnv*, jclass);
+ fnptr* fn = reinterpret_cast<fnptr*>(method->GetNativeMethod());
+ ScopedLocalRef<jclass> klass(soa.Env(),
+ soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
+ ScopedThreadStateChange tsc(self, kNative);
+ fn(soa.Env(), klass.get());
+ } else if (shorty == "Z") {
+ typedef jboolean (fnptr)(JNIEnv*, jclass);
+ fnptr* fn = reinterpret_cast<fnptr*>(method->GetNativeMethod());
+ ScopedLocalRef<jclass> klass(soa.Env(),
+ soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
+ ScopedThreadStateChange tsc(self, kNative);
+ result->SetZ(fn(soa.Env(), klass.get()));
+ } else if (shorty == "BI") {
+ typedef jbyte (fnptr)(JNIEnv*, jclass, jint);
+ fnptr* fn = reinterpret_cast<fnptr*>(method->GetNativeMethod());
+ ScopedLocalRef<jclass> klass(soa.Env(),
+ soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
+ ScopedThreadStateChange tsc(self, kNative);
+ result->SetB(fn(soa.Env(), klass.get(), args[0].GetI()));
+ } else if (shorty == "II") {
+ typedef jint (fnptr)(JNIEnv*, jclass, jint);
+ fnptr* fn = reinterpret_cast<fnptr*>(method->GetNativeMethod());
+ ScopedLocalRef<jclass> klass(soa.Env(),
+ soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
+ ScopedThreadStateChange tsc(self, kNative);
+ result->SetI(fn(soa.Env(), klass.get(), args[0].GetI()));
+ } else if (shorty == "LL") {
+ typedef jobject (fnptr)(JNIEnv*, jclass, jobject);
+ fnptr* fn = reinterpret_cast<fnptr*>(method->GetNativeMethod());
+ ScopedLocalRef<jclass> klass(soa.Env(),
+ soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
+ ScopedLocalRef<jobject> arg0(soa.Env(),
+ soa.AddLocalReference<jobject>(args[0].GetL()));
+ ScopedThreadStateChange tsc(self, kNative);
+ result->SetL(soa.Decode<Object*>(fn(soa.Env(), klass.get(), arg0.get())));
+ } else if (shorty == "IIZ") {
+ typedef jint (fnptr)(JNIEnv*, jclass, jint, jboolean);
+ fnptr* fn = reinterpret_cast<fnptr*>(method->GetNativeMethod());
+ ScopedLocalRef<jclass> klass(soa.Env(),
+ soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
+ ScopedThreadStateChange tsc(self, kNative);
+ result->SetI(fn(soa.Env(), klass.get(), args[0].GetI(), args[1].GetZ()));
+ } else if (shorty == "ILI") {
+ typedef jint (fnptr)(JNIEnv*, jclass, jobject, jint);
+ fnptr* fn = reinterpret_cast<fnptr*>(method->GetNativeMethod());
+ ScopedLocalRef<jclass> klass(soa.Env(),
+ soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
+ ScopedLocalRef<jobject> arg0(soa.Env(),
+ soa.AddLocalReference<jobject>(args[0].GetL()));
+ ScopedThreadStateChange tsc(self, kNative);
+ result->SetI(fn(soa.Env(), klass.get(), arg0.get(), args[1].GetI()));
+ } else if (shorty == "SIZ") {
+ typedef jshort (fnptr)(JNIEnv*, jclass, jint, jboolean);
+ fnptr* fn = reinterpret_cast<fnptr*>(method->GetNativeMethod());
+ ScopedLocalRef<jclass> klass(soa.Env(),
+ soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
+ ScopedThreadStateChange tsc(self, kNative);
+ result->SetS(fn(soa.Env(), klass.get(), args[0].GetI(), args[1].GetZ()));
+ } else if (shorty == "VIZ") {
+ typedef void (fnptr)(JNIEnv*, jclass, jint, jboolean);
+ fnptr* fn = reinterpret_cast<fnptr*>(method->GetNativeMethod());
+ ScopedLocalRef<jclass> klass(soa.Env(),
+ soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
+ ScopedThreadStateChange tsc(self, kNative);
+ fn(soa.Env(), klass.get(), args[0].GetI(), args[1].GetZ());
+ } else if (shorty == "ZLL") {
+ typedef jboolean (fnptr)(JNIEnv*, jclass, jobject, jobject);
+ fnptr* fn = reinterpret_cast<fnptr*>(method->GetNativeMethod());
+ ScopedLocalRef<jclass> klass(soa.Env(),
+ soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
+ ScopedLocalRef<jobject> arg0(soa.Env(),
+ soa.AddLocalReference<jobject>(args[0].GetL()));
+ ScopedLocalRef<jobject> arg1(soa.Env(),
+ soa.AddLocalReference<jobject>(args[1].GetL()));
+ ScopedThreadStateChange tsc(self, kNative);
+ result->SetZ(fn(soa.Env(), klass.get(), arg0.get(), arg1.get()));
+ } else if (shorty == "ZILL") {
+ typedef jboolean (fnptr)(JNIEnv*, jclass, jint, jobject, jobject);
+ fnptr* fn = reinterpret_cast<fnptr*>(method->GetNativeMethod());
+ ScopedLocalRef<jclass> klass(soa.Env(),
+ soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
+ ScopedLocalRef<jobject> arg1(soa.Env(),
+ soa.AddLocalReference<jobject>(args[1].GetL()));
+ ScopedLocalRef<jobject> arg2(soa.Env(),
+ soa.AddLocalReference<jobject>(args[2].GetL()));
+ ScopedThreadStateChange tsc(self, kNative);
+ result->SetZ(fn(soa.Env(), klass.get(), args[0].GetI(), arg1.get(), arg2.get()));
+ } else if (shorty == "VILII") {
+ typedef void (fnptr)(JNIEnv*, jclass, jint, jobject, jint, jint);
+ fnptr* fn = reinterpret_cast<fnptr*>(method->GetNativeMethod());
+ ScopedLocalRef<jclass> klass(soa.Env(),
+ soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
+ ScopedLocalRef<jobject> arg1(soa.Env(),
+ soa.AddLocalReference<jobject>(args[1].GetL()));
+ ScopedThreadStateChange tsc(self, kNative);
+ fn(soa.Env(), klass.get(), args[0].GetI(), arg1.get(), args[2].GetI(), args[3].GetI());
+ } else if (shorty == "VLILII") {
+ typedef void (fnptr)(JNIEnv*, jclass, jobject, jint, jobject, jint, jint);
+ fnptr* fn = reinterpret_cast<fnptr*>(method->GetNativeMethod());
+ ScopedLocalRef<jclass> klass(soa.Env(),
+ soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
+ ScopedLocalRef<jobject> arg0(soa.Env(),
+ soa.AddLocalReference<jobject>(args[0].GetL()));
+ ScopedLocalRef<jobject> arg2(soa.Env(),
+ soa.AddLocalReference<jobject>(args[2].GetL()));
+ ScopedThreadStateChange tsc(self, kNative);
+ fn(soa.Env(), klass.get(), arg0.get(), args[1].GetI(), arg2.get(), args[3].GetI(),
+ args[4].GetI());
+ } else {
+ LOG(FATAL) << "Do something with static native method: " << PrettyMethod(method)
+ << " shorty: " << shorty;
+ }
+ } else {
+ if (shorty == "L") {
+ typedef jobject (fnptr)(JNIEnv*, jobject);
+ fnptr* fn = reinterpret_cast<fnptr*>(method->GetNativeMethod());
+ ScopedLocalRef<jobject> rcvr(soa.Env(),
+ soa.AddLocalReference<jobject>(receiver));
+ ScopedThreadStateChange tsc(self, kNative);
+ result->SetL(soa.Decode<Object*>(fn(soa.Env(), rcvr.get())));
+ } else if (shorty == "LL") {
+ typedef jobject (fnptr)(JNIEnv*, jobject, jobject);
+ fnptr* fn = reinterpret_cast<fnptr*>(method->GetNativeMethod());
+ ScopedLocalRef<jobject> rcvr(soa.Env(),
+ soa.AddLocalReference<jobject>(receiver));
+ ScopedLocalRef<jobject> arg0(soa.Env(),
+ soa.AddLocalReference<jobject>(args[0].GetL()));
+ ScopedThreadStateChange tsc(self, kNative);
+ result->SetL(soa.Decode<Object*>(fn(soa.Env(), rcvr.get(), arg0.get())));
+ } else if (shorty == "III") {
+ typedef jint (fnptr)(JNIEnv*, jobject, jint, jint);
+ fnptr* fn = reinterpret_cast<fnptr*>(method->GetNativeMethod());
+ ScopedLocalRef<jobject> rcvr(soa.Env(),
+ soa.AddLocalReference<jobject>(receiver));
+ ScopedThreadStateChange tsc(self, kNative);
+ result->SetI(fn(soa.Env(), rcvr.get(), args[0].GetI(), args[1].GetI()));
+ } else {
+ LOG(FATAL) << "Do something with native method: " << PrettyMethod(method)
+ << " shorty: " << shorty;
+ }
+ }
+ }
+ self->PopShadowFrame();
+}
+
+} // namespace interpreter
+} // namespace art
diff --git a/src/interpreter/interpreter.h b/src/interpreter/interpreter.h
new file mode 100644
index 0000000..ea07ce8
--- /dev/null
+++ b/src/interpreter/interpreter.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_INTERPRETER_INTERPRETER_H_
+#define ART_SRC_INTERPRETER_INTERPRETER_H_
+
+#include "locks.h"
+
+namespace art {
+
+class AbstractMethod;
+union JValue;
+class Object;
+class Thread;
+
+namespace interpreter {
+
+extern void EnterInterpreterFromInvoke(Thread* self, AbstractMethod* method, Object* receiver,
+ JValue* args, JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+} // namespace interpreter
+} // namespace art
+
+#endif // ART_SRC_INTERPRETER_INTERPRETER_H_
diff --git a/src/invoke_arg_array_builder.h b/src/invoke_arg_array_builder.h
new file mode 100644
index 0000000..e965a1a
--- /dev/null
+++ b/src/invoke_arg_array_builder.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_INVOKE_ARG_ARRAY_BUILDER_H_
+#define ART_SRC_INVOKE_ARG_ARRAY_BUILDER_H_
+
+#include "object.h"
+#include "scoped_thread_state_change.h"
+
+namespace art {
+
+static inline size_t NumArgArrayBytes(const char* shorty, uint32_t shorty_len) {
+ size_t num_bytes = 0;
+ for (size_t i = 1; i < shorty_len; ++i) {
+ char ch = shorty[i];
+ if (ch == 'D' || ch == 'J') {
+ num_bytes += 8;
+ } else if (ch == 'L') {
+ // Argument is a reference or an array. The shorty descriptor
+ // does not distinguish between these types.
+ num_bytes += sizeof(Object*);
+ } else {
+ num_bytes += 4;
+ }
+ }
+ return num_bytes;
+}
+
+class ArgArray {
+ public:
+ explicit ArgArray(const char* shorty, uint32_t shorty_len)
+ : shorty_(shorty), shorty_len_(shorty_len) {
+ if (shorty_len - 1 < kSmallArgArraySize) {
+ arg_array_ = small_arg_array_;
+ } else {
+ large_arg_array_.reset(new JValue[shorty_len_ - 1]);
+ arg_array_ = large_arg_array_.get();
+ }
+ }
+
+ JValue* get() {
+ return arg_array_;
+ }
+
+ void BuildArgArray(const ScopedObjectAccess& soa, va_list ap)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ for (size_t i = 1, offset = 0; i < shorty_len_; ++i, ++offset) {
+ switch (shorty_[i]) {
+ case 'Z':
+ arg_array_[offset].SetZ(va_arg(ap, jint));
+ break;
+ case 'B':
+ arg_array_[offset].SetB(va_arg(ap, jint));
+ break;
+ case 'C':
+ arg_array_[offset].SetC(va_arg(ap, jint));
+ break;
+ case 'S':
+ arg_array_[offset].SetS(va_arg(ap, jint));
+ break;
+ case 'I':
+ arg_array_[offset].SetI(va_arg(ap, jint));
+ break;
+ case 'F':
+ arg_array_[offset].SetF(va_arg(ap, jdouble));
+ break;
+ case 'L':
+ arg_array_[offset].SetL(soa.Decode<Object*>(va_arg(ap, jobject)));
+ break;
+ case 'D':
+ arg_array_[offset].SetD(va_arg(ap, jdouble));
+ break;
+ case 'J':
+ arg_array_[offset].SetJ(va_arg(ap, jlong));
+ break;
+ }
+ }
+ }
+
+ void BuildArgArray(const ScopedObjectAccess& soa, jvalue* args)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ for (size_t i = 1, offset = 0; i < shorty_len_; ++i, ++offset) {
+ switch (shorty_[i]) {
+ case 'Z':
+ arg_array_[offset].SetZ(args[offset].z);
+ break;
+ case 'B':
+ arg_array_[offset].SetB(args[offset].b);
+ break;
+ case 'C':
+ arg_array_[offset].SetC(args[offset].c);
+ break;
+ case 'S':
+ arg_array_[offset].SetS(args[offset].s);
+ break;
+ case 'I':
+ arg_array_[offset].SetI(args[offset].i);
+ break;
+ case 'F':
+ arg_array_[offset].SetF(args[offset].f);
+ break;
+ case 'L':
+ arg_array_[offset].SetL(soa.Decode<Object*>(args[offset].l));
+ break;
+ case 'D':
+ arg_array_[offset].SetD(args[offset].d);
+ break;
+ case 'J':
+ arg_array_[offset].SetJ(args[offset].j);
+ break;
+ }
+ }
+ }
+
+ void BuildArgArray(const ShadowFrame& shadow_frame, uint32_t range_start)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ for (size_t i = 1, offset = range_start; i < shorty_len_; ++i, ++offset) {
+ switch (shorty_[i]) {
+ case 'Z':
+ arg_array_[offset].SetZ(shadow_frame.GetVReg(offset));
+ break;
+ case 'B':
+ arg_array_[offset].SetB(shadow_frame.GetVReg(offset));
+ break;
+ case 'C':
+ arg_array_[offset].SetC(shadow_frame.GetVReg(offset));
+ break;
+ case 'S':
+ arg_array_[offset].SetS(shadow_frame.GetVReg(offset));
+ break;
+ case 'I':
+ arg_array_[offset].SetI(shadow_frame.GetVReg(offset));
+ break;
+ case 'F':
+ arg_array_[offset].SetF(shadow_frame.GetVRegFloat(offset));
+ break;
+ case 'L':
+ arg_array_[offset].SetL(shadow_frame.GetReference(offset));
+ break;
+ case 'D':
+ arg_array_[offset].SetD(shadow_frame.GetVRegDouble(offset));
+ offset++;
+ break;
+ case 'J':
+ arg_array_[offset].SetJ(shadow_frame.GetVRegLong(offset));
+ offset++;
+ break;
+ }
+ }
+ }
+
+ void BuildArgArray(const ShadowFrame& shadow_frame, const uint32_t* arg_regs)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ for (size_t i = 1, offset = 0; i < shorty_len_; ++i, ++offset) {
+ switch (shorty_[i]) {
+ case 'Z':
+ arg_array_[offset].SetZ(shadow_frame.GetVReg(arg_regs[offset]));
+ break;
+ case 'B':
+ arg_array_[offset].SetB(shadow_frame.GetVReg(arg_regs[offset]));
+ break;
+ case 'C':
+ arg_array_[offset].SetC(shadow_frame.GetVReg(arg_regs[offset]));
+ break;
+ case 'S':
+ arg_array_[offset].SetS(shadow_frame.GetVReg(arg_regs[offset]));
+ break;
+ case 'I':
+ arg_array_[offset].SetI(shadow_frame.GetVReg(arg_regs[offset]));
+ break;
+ case 'F':
+ arg_array_[offset].SetF(shadow_frame.GetVRegFloat(arg_regs[offset]));
+ break;
+ case 'L':
+ arg_array_[offset].SetL(shadow_frame.GetReference(arg_regs[offset]));
+ break;
+ case 'D':
+ arg_array_[offset].SetD(shadow_frame.GetVRegDouble(arg_regs[offset]));
+ offset++;
+ break;
+ case 'J':
+ arg_array_[offset].SetJ(shadow_frame.GetVRegLong(arg_regs[offset]));
+ offset++;
+ break;
+ }
+ }
+ }
+
+ private:
+ enum { kSmallArgArraySize = 16 };
+ const char* const shorty_;
+ const uint32_t shorty_len_;
+ JValue* arg_array_;
+ JValue small_arg_array_[kSmallArgArraySize];
+ UniquePtr<JValue[]> large_arg_array_;
+};
+
+} // namespace art
+
+#endif // ART_SRC_INVOKE_ARG_ARRAY_BUILDER_H_
diff --git a/src/jni_internal.cc b/src/jni_internal.cc
index 60b49de..097d587 100644
--- a/src/jni_internal.cc
+++ b/src/jni_internal.cc
@@ -24,6 +24,7 @@
#include "class_linker.h"
#include "class_loader.h"
+#include "invoke_arg_array_builder.h"
#include "jni.h"
#include "logging.h"
#include "mutex.h"
@@ -74,120 +75,6 @@
}
}
-size_t NumArgArrayBytes(const char* shorty, uint32_t shorty_len) {
- size_t num_bytes = 0;
- for (size_t i = 1; i < shorty_len; ++i) {
- char ch = shorty[i];
- if (ch == 'D' || ch == 'J') {
- num_bytes += 8;
- } else if (ch == 'L') {
- // Argument is a reference or an array. The shorty descriptor
- // does not distinguish between these types.
- num_bytes += sizeof(Object*);
- } else {
- num_bytes += 4;
- }
- }
- return num_bytes;
-}
-
-class ArgArray {
- public:
- explicit ArgArray(AbstractMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- MethodHelper mh(method);
- shorty_ = mh.GetShorty();
- shorty_len_ = mh.GetShortyLength();
- if (shorty_len_ - 1 < kSmallArgArraySize) {
- arg_array_ = small_arg_array_;
- } else {
- large_arg_array_.reset(new JValue[shorty_len_ - 1]);
- arg_array_ = large_arg_array_.get();
- }
- }
-
- JValue* get() {
- return arg_array_;
- }
-
- void BuildArgArray(const ScopedObjectAccess& soa, va_list ap)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- for (size_t i = 1, offset = 0; i < shorty_len_; ++i, ++offset) {
- switch (shorty_[i]) {
- case 'Z':
- arg_array_[offset].SetZ(va_arg(ap, jint));
- break;
- case 'B':
- arg_array_[offset].SetB(va_arg(ap, jint));
- break;
- case 'C':
- arg_array_[offset].SetC(va_arg(ap, jint));
- break;
- case 'S':
- arg_array_[offset].SetS(va_arg(ap, jint));
- break;
- case 'I':
- arg_array_[offset].SetI(va_arg(ap, jint));
- break;
- case 'F':
- arg_array_[offset].SetF(va_arg(ap, jdouble));
- break;
- case 'L':
- arg_array_[offset].SetL(soa.Decode<Object*>(va_arg(ap, jobject)));
- break;
- case 'D':
- arg_array_[offset].SetD(va_arg(ap, jdouble));
- break;
- case 'J':
- arg_array_[offset].SetJ(va_arg(ap, jlong));
- break;
- }
- }
- }
-
- void BuildArgArray(const ScopedObjectAccess& soa, jvalue* args)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- for (size_t i = 1, offset = 0; i < shorty_len_; ++i, ++offset) {
- switch (shorty_[i]) {
- case 'Z':
- arg_array_[offset].SetZ(args[offset].z);
- break;
- case 'B':
- arg_array_[offset].SetB(args[offset].b);
- break;
- case 'C':
- arg_array_[offset].SetC(args[offset].c);
- break;
- case 'S':
- arg_array_[offset].SetS(args[offset].s);
- break;
- case 'I':
- arg_array_[offset].SetI(args[offset].i);
- break;
- case 'F':
- arg_array_[offset].SetF(args[offset].f);
- break;
- case 'L':
- arg_array_[offset].SetL(soa.Decode<Object*>(args[offset].l));
- break;
- case 'D':
- arg_array_[offset].SetD(args[offset].d);
- break;
- case 'J':
- arg_array_[offset].SetJ(args[offset].j);
- break;
- }
- }
- }
-
- private:
- enum { kSmallArgArraySize = 16 };
- const char* shorty_;
- uint32_t shorty_len_;
- JValue* arg_array_;
- JValue small_arg_array_[kSmallArgArraySize];
- UniquePtr<JValue[]> large_arg_array_;
-};
-
static jweak AddWeakGlobalReference(ScopedObjectAccess& soa, Object* obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (obj == NULL) {
@@ -253,7 +140,8 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Object* receiver = soa.Decode<Object*>(obj);
AbstractMethod* method = soa.DecodeMethod(mid);
- ArgArray arg_array(method);
+ MethodHelper mh(method);
+ ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength());
arg_array.BuildArgArray(soa, args);
return InvokeWithArgArray(soa, receiver, method, arg_array.get());
}
@@ -268,7 +156,8 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Object* receiver = soa.Decode<Object*>(obj);
AbstractMethod* method = FindVirtualMethod(receiver, soa.DecodeMethod(mid));
- ArgArray arg_array(method);
+ MethodHelper mh(method);
+ ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength());
arg_array.BuildArgArray(soa, args);
return InvokeWithArgArray(soa, receiver, method, arg_array.get());
}
@@ -278,7 +167,8 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Object* receiver = soa.Decode<Object*>(obj);
AbstractMethod* method = FindVirtualMethod(receiver, soa.DecodeMethod(mid));
- ArgArray arg_array(method);
+ MethodHelper mh(method);
+ ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength());
arg_array.BuildArgArray(soa, args);
return InvokeWithArgArray(soa, receiver, method, arg_array.get());
}
@@ -670,7 +560,8 @@
jvalue* args) {
Object* receiver = soa.Decode<Object*>(obj);
AbstractMethod* method = soa.DecodeMethod(mid);
- ArgArray arg_array(method);
+ MethodHelper mh(method);
+ ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength());
arg_array.BuildArgArray(soa, args);
return InvokeWithArgArray(soa, receiver, method, arg_array.get());
}
@@ -1415,7 +1306,7 @@
static jobject GetStaticObjectField(JNIEnv* env, jclass, jfieldID fid) {
ScopedObjectAccess soa(env);
Field* f = soa.DecodeField(fid);
- return soa.AddLocalReference<jobject>(f->GetObject(NULL));
+ return soa.AddLocalReference<jobject>(f->GetObject(f->GetDeclaringClass()));
}
static void SetObjectField(JNIEnv* env, jobject java_object, jfieldID fid, jobject java_value) {
@@ -1430,7 +1321,7 @@
ScopedObjectAccess soa(env);
Object* v = soa.Decode<Object*>(java_value);
Field* f = soa.DecodeField(fid);
- f->SetObject(NULL, v);
+ f->SetObject(f->GetDeclaringClass(), v);
}
#define GET_PRIMITIVE_FIELD(fn, instance) \
@@ -1439,12 +1330,22 @@
Field* f = soa.DecodeField(fid); \
return f->fn(o)
+#define GET_STATIC_PRIMITIVE_FIELD(fn) \
+ ScopedObjectAccess soa(env); \
+ Field* f = soa.DecodeField(fid); \
+ return f->fn(f->GetDeclaringClass())
+
#define SET_PRIMITIVE_FIELD(fn, instance, value) \
ScopedObjectAccess soa(env); \
Object* o = soa.Decode<Object*>(instance); \
Field* f = soa.DecodeField(fid); \
f->fn(o, value)
+#define SET_STATIC_PRIMITIVE_FIELD(fn, value) \
+ ScopedObjectAccess soa(env); \
+ Field* f = soa.DecodeField(fid); \
+ f->fn(f->GetDeclaringClass(), value)
+
static jboolean GetBooleanField(JNIEnv* env, jobject obj, jfieldID fid) {
GET_PRIMITIVE_FIELD(GetBoolean, obj);
}
@@ -1478,35 +1379,35 @@
}
static jboolean GetStaticBooleanField(JNIEnv* env, jclass, jfieldID fid) {
- GET_PRIMITIVE_FIELD(GetBoolean, NULL);
+ GET_STATIC_PRIMITIVE_FIELD(GetBoolean);
}
static jbyte GetStaticByteField(JNIEnv* env, jclass, jfieldID fid) {
- GET_PRIMITIVE_FIELD(GetByte, NULL);
+ GET_STATIC_PRIMITIVE_FIELD(GetByte);
}
static jchar GetStaticCharField(JNIEnv* env, jclass, jfieldID fid) {
- GET_PRIMITIVE_FIELD(GetChar, NULL);
+ GET_STATIC_PRIMITIVE_FIELD(GetChar);
}
static jshort GetStaticShortField(JNIEnv* env, jclass, jfieldID fid) {
- GET_PRIMITIVE_FIELD(GetShort, NULL);
+ GET_STATIC_PRIMITIVE_FIELD(GetShort);
}
static jint GetStaticIntField(JNIEnv* env, jclass, jfieldID fid) {
- GET_PRIMITIVE_FIELD(GetInt, NULL);
+ GET_STATIC_PRIMITIVE_FIELD(GetInt);
}
static jlong GetStaticLongField(JNIEnv* env, jclass, jfieldID fid) {
- GET_PRIMITIVE_FIELD(GetLong, NULL);
+ GET_STATIC_PRIMITIVE_FIELD(GetLong);
}
static jfloat GetStaticFloatField(JNIEnv* env, jclass, jfieldID fid) {
- GET_PRIMITIVE_FIELD(GetFloat, NULL);
+ GET_STATIC_PRIMITIVE_FIELD(GetFloat);
}
static jdouble GetStaticDoubleField(JNIEnv* env, jclass, jfieldID fid) {
- GET_PRIMITIVE_FIELD(GetDouble, NULL);
+ GET_STATIC_PRIMITIVE_FIELD(GetDouble);
}
static void SetBooleanField(JNIEnv* env, jobject obj, jfieldID fid, jboolean v) {
@@ -1542,35 +1443,35 @@
}
static void SetStaticBooleanField(JNIEnv* env, jclass, jfieldID fid, jboolean v) {
- SET_PRIMITIVE_FIELD(SetBoolean, NULL, v);
+ SET_STATIC_PRIMITIVE_FIELD(SetBoolean, v);
}
static void SetStaticByteField(JNIEnv* env, jclass, jfieldID fid, jbyte v) {
- SET_PRIMITIVE_FIELD(SetByte, NULL, v);
+ SET_STATIC_PRIMITIVE_FIELD(SetByte, v);
}
static void SetStaticCharField(JNIEnv* env, jclass, jfieldID fid, jchar v) {
- SET_PRIMITIVE_FIELD(SetChar, NULL, v);
+ SET_STATIC_PRIMITIVE_FIELD(SetChar, v);
}
static void SetStaticFloatField(JNIEnv* env, jclass, jfieldID fid, jfloat v) {
- SET_PRIMITIVE_FIELD(SetFloat, NULL, v);
+ SET_STATIC_PRIMITIVE_FIELD(SetFloat, v);
}
static void SetStaticDoubleField(JNIEnv* env, jclass, jfieldID fid, jdouble v) {
- SET_PRIMITIVE_FIELD(SetDouble, NULL, v);
+ SET_STATIC_PRIMITIVE_FIELD(SetDouble, v);
}
static void SetStaticIntField(JNIEnv* env, jclass, jfieldID fid, jint v) {
- SET_PRIMITIVE_FIELD(SetInt, NULL, v);
+ SET_STATIC_PRIMITIVE_FIELD(SetInt, v);
}
static void SetStaticLongField(JNIEnv* env, jclass, jfieldID fid, jlong v) {
- SET_PRIMITIVE_FIELD(SetLong, NULL, v);
+ SET_STATIC_PRIMITIVE_FIELD(SetLong, v);
}
static void SetStaticShortField(JNIEnv* env, jclass, jfieldID fid, jshort v) {
- SET_PRIMITIVE_FIELD(SetShort, NULL, v);
+ SET_STATIC_PRIMITIVE_FIELD(SetShort, v);
}
static jobject CallStaticObjectMethod(JNIEnv* env, jclass, jmethodID mid, ...) {
diff --git a/src/jni_internal.h b/src/jni_internal.h
index c683464..95bc281 100644
--- a/src/jni_internal.h
+++ b/src/jni_internal.h
@@ -52,7 +52,6 @@
void RegisterNativeMethods(JNIEnv* env, const char* jni_class_name, const JNINativeMethod* methods,
size_t method_count);
-size_t NumArgArrayBytes(const char* shorty, uint32_t shorty_len);
JValue InvokeWithJValues(const ScopedObjectAccess&, jobject obj, jmethodID mid, jvalue* args)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
JValue InvokeWithJValues(const ScopedObjectAccess&, Object* receiver, AbstractMethod* m, JValue* args)
diff --git a/src/modifiers.h b/src/modifiers.h
index 070130f..ee2d4ff 100644
--- a/src/modifiers.h
+++ b/src/modifiers.h
@@ -43,6 +43,8 @@
static const uint32_t kAccConstructor = 0x00010000; // method (dex only)
static const uint32_t kAccDeclaredSynchronized = 0x00020000; // method (dex only)
static const uint32_t kAccClassIsProxy = 0x00040000; // class (dex only)
+// TODO: JACK CLASS ACCESS (HACK TO BE REMOVED)
+static const uint32_t kAccClassJack = 0x000080000; // class (dex only)
// Special runtime-only flags.
// Note: if only kAccClassIsReference is set, we have a soft reference.
diff --git a/src/native/java_lang_reflect_Field.cc b/src/native/java_lang_reflect_Field.cc
index c82e503..d99ccb3 100644
--- a/src/native/java_lang_reflect_Field.cc
+++ b/src/native/java_lang_reflect_Field.cc
@@ -76,7 +76,7 @@
Object*& o)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (f->IsStatic()) {
- o = NULL;
+ o = f->GetDeclaringClass();
return true;
}
diff --git a/src/oat/jni/arm/jni_internal_arm.cc b/src/oat/jni/arm/jni_internal_arm.cc
index 61f29af..48d649d 100644
--- a/src/oat/jni/arm/jni_internal_arm.cc
+++ b/src/oat/jni/arm/jni_internal_arm.cc
@@ -21,6 +21,7 @@
#include "asm_support.h"
#include "compiled_method.h"
#include "compiler.h"
+#include "invoke_arg_array_builder.h"
#include "jni_internal.h"
#include "oat/utils/arm/assembler_arm.h"
#include "oat/utils/assembler.h"
diff --git a/src/oat/jni/mips/jni_internal_mips.cc b/src/oat/jni/mips/jni_internal_mips.cc
index a1fc0bf..dd66be9 100644
--- a/src/oat/jni/mips/jni_internal_mips.cc
+++ b/src/oat/jni/mips/jni_internal_mips.cc
@@ -21,6 +21,7 @@
#include "asm_support.h"
#include "compiled_method.h"
#include "compiler.h"
+#include "invoke_arg_array_builder.h"
#include "jni_internal.h"
#include "oat/utils/mips/assembler_mips.h"
#include "oat/utils/assembler.h"
diff --git a/src/oat/jni/x86/jni_internal_x86.cc b/src/oat/jni/x86/jni_internal_x86.cc
index c34112b..ca4a6ab 100644
--- a/src/oat/jni/x86/jni_internal_x86.cc
+++ b/src/oat/jni/x86/jni_internal_x86.cc
@@ -16,6 +16,7 @@
#include "compiled_method.h"
#include "compiler.h"
+#include "invoke_arg_array_builder.h"
#include "jni_internal.h"
#include "oat/utils/assembler.h"
#include "oat/utils/x86/assembler_x86.h"
diff --git a/src/oat/runtime/support_cast.cc b/src/oat/runtime/support_cast.cc
index 16eddc4..0db743b 100644
--- a/src/oat/runtime/support_cast.cc
+++ b/src/oat/runtime/support_cast.cc
@@ -37,7 +37,7 @@
return 0; // Success
} else {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
- Thread::Current()->ThrowNewExceptionF("Ljava/lang/ClassCastException;",
+ self->ThrowNewExceptionF("Ljava/lang/ClassCastException;",
"%s cannot be cast to %s",
PrettyDescriptor(a).c_str(),
PrettyDescriptor(b).c_str());
@@ -58,7 +58,7 @@
return 0; // Success
} else {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
- Thread::Current()->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;",
+ self->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;",
"%s cannot be stored in an array of type %s",
PrettyDescriptor(element_class).c_str(),
PrettyDescriptor(array_class).c_str());
diff --git a/src/oat/runtime/support_field.cc b/src/oat/runtime/support_field.cc
index 53b53b2..9336247 100644
--- a/src/oat/runtime/support_field.cc
+++ b/src/oat/runtime/support_field.cc
@@ -26,12 +26,12 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int32_t));
if (LIKELY(field != NULL)) {
- return field->Get32(NULL);
+ return field->Get32(field->GetDeclaringClass());
}
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
field = FindFieldFromCode(field_idx, referrer, self, StaticPrimitiveRead, sizeof(int32_t));
if (LIKELY(field != NULL)) {
- return field->Get32(NULL);
+ return field->Get32(field->GetDeclaringClass());
}
return 0; // Will throw exception by checking with Thread::Current
}
@@ -41,12 +41,12 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int64_t));
if (LIKELY(field != NULL)) {
- return field->Get64(NULL);
+ return field->Get64(field->GetDeclaringClass());
}
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
field = FindFieldFromCode(field_idx, referrer, self, StaticPrimitiveRead, sizeof(int64_t));
if (LIKELY(field != NULL)) {
- return field->Get64(NULL);
+ return field->Get64(field->GetDeclaringClass());
}
return 0; // Will throw exception by checking with Thread::Current
}
@@ -56,12 +56,12 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Field* field = FindFieldFast(field_idx, referrer, StaticObjectRead, sizeof(Object*));
if (LIKELY(field != NULL)) {
- return field->GetObj(NULL);
+ return field->GetObj(field->GetDeclaringClass());
}
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
field = FindFieldFromCode(field_idx, referrer, self, StaticObjectRead, sizeof(Object*));
if (LIKELY(field != NULL)) {
- return field->GetObj(NULL);
+ return field->GetObj(field->GetDeclaringClass());
}
return NULL; // Will throw exception by checking with Thread::Current
}
@@ -132,13 +132,13 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int32_t));
if (LIKELY(field != NULL)) {
- field->Set32(NULL, new_value);
+ field->Set32(field->GetDeclaringClass(), new_value);
return 0; // success
}
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
field = FindFieldFromCode(field_idx, referrer, self, StaticPrimitiveWrite, sizeof(int32_t));
if (LIKELY(field != NULL)) {
- field->Set32(NULL, new_value);
+ field->Set32(field->GetDeclaringClass(), new_value);
return 0; // success
}
return -1; // failure
@@ -149,13 +149,13 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int64_t));
if (LIKELY(field != NULL)) {
- field->Set64(NULL, new_value);
+ field->Set64(field->GetDeclaringClass(), new_value);
return 0; // success
}
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
field = FindFieldFromCode(field_idx, referrer, self, StaticPrimitiveWrite, sizeof(int64_t));
if (LIKELY(field != NULL)) {
- field->Set64(NULL, new_value);
+ field->Set64(field->GetDeclaringClass(), new_value);
return 0; // success
}
return -1; // failure
@@ -168,14 +168,14 @@
Field* field = FindFieldFast(field_idx, referrer, StaticObjectWrite, sizeof(Object*));
if (LIKELY(field != NULL)) {
if (LIKELY(!FieldHelper(field).IsPrimitiveType())) {
- field->SetObj(NULL, new_value);
+ field->SetObj(field->GetDeclaringClass(), new_value);
return 0; // success
}
}
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
field = FindFieldFromCode(field_idx, referrer, self, StaticObjectWrite, sizeof(Object*));
if (LIKELY(field != NULL)) {
- field->SetObj(NULL, new_value);
+ field->SetObj(field->GetDeclaringClass(), new_value);
return 0; // success
}
return -1; // failure
diff --git a/src/oat_file.cc b/src/oat_file.cc
index 4e05325..145d2e2 100644
--- a/src/oat_file.cc
+++ b/src/oat_file.cc
@@ -70,11 +70,7 @@
bool OatFile::Map(File& file,
byte* requested_base,
-#if defined(ART_USE_LLVM_COMPILER)
- RelocationBehavior reloc,
-#else
RelocationBehavior /*UNUSED*/,
-#endif
bool writable) {
OatHeader oat_header;
bool success = file.ReadFully(&oat_header, sizeof(oat_header));
diff --git a/src/oat_test.cc b/src/oat_test.cc
index 64c502d..bb6305a 100644
--- a/src/oat_test.cc
+++ b/src/oat_test.cc
@@ -65,7 +65,15 @@
jobject class_loader = NULL;
if (compile) {
- compiler_.reset(new Compiler(kThumb2, false, 2, false, NULL, true, true));
+ // TODO: make selectable
+#if defined(ART_USE_PORTABLE_COMPILER)
+ CompilerBackend compiler_backend = kPortable;
+#elif defined(ART_USE_LLVM_COMPILER)
+ CompilerBackend compiler_backend = kIceland; // TODO: remove
+#else
+ CompilerBackend compiler_backend = kQuick;
+#endif
+ compiler_.reset(new Compiler(compiler_backend, kThumb2, false, 2, false, NULL, true, true));
compiler_->CompileAll(class_loader, class_linker->GetBootClassPath());
}
diff --git a/src/oatdump.cc b/src/oatdump.cc
index 4231ecf..0db71c9 100644
--- a/src/oatdump.cc
+++ b/src/oatdump.cc
@@ -247,7 +247,10 @@
UniquePtr<const OatFile::OatClass> oat_class(oat_dex_file.GetOatClass(class_def_index));
CHECK(oat_class.get() != NULL);
os << StringPrintf("%zd: %s (type_idx=%d) (", class_def_index, descriptor, class_def.class_idx_)
- << oat_class->GetStatus() << ")\n";
+ << oat_class->GetStatus() << ")"
+ // TODO: JACK CLASS ACCESS (HACK TO BE REMOVED)
+ << ( (class_def.access_flags_ & kAccClassJack) == kAccClassJack ? " (Jack)" : "" )
+ << "\n";
DumpOatClass(os, *oat_class.get(), *(dex_file.get()), class_def);
}
diff --git a/src/object.cc b/src/object.cc
index 5fdea71..9189f03 100644
--- a/src/object.cc
+++ b/src/object.cc
@@ -29,6 +29,7 @@
#include "dex_file.h"
#include "globals.h"
#include "heap.h"
+#include "interpreter/interpreter.h"
#include "intern_table.h"
#include "logging.h"
#include "monitor.h"
@@ -42,11 +43,52 @@
namespace art {
+BooleanArray* Object::AsBooleanArray() {
+ DCHECK(GetClass()->IsArrayClass());
+ DCHECK(GetClass()->GetComponentType()->IsPrimitiveBoolean());
+ return down_cast<BooleanArray*>(this);
+}
+
+ByteArray* Object::AsByteArray() {
+ DCHECK(GetClass()->IsArrayClass());
+ DCHECK(GetClass()->GetComponentType()->IsPrimitiveByte());
+ return down_cast<ByteArray*>(this);
+}
+
+CharArray* Object::AsCharArray() {
+ DCHECK(GetClass()->IsArrayClass());
+ DCHECK(GetClass()->GetComponentType()->IsPrimitiveChar());
+ return down_cast<CharArray*>(this);
+}
+
+ShortArray* Object::AsShortArray() {
+ DCHECK(GetClass()->IsArrayClass());
+ DCHECK(GetClass()->GetComponentType()->IsPrimitiveShort());
+ return down_cast<ShortArray*>(this);
+}
+
+IntArray* Object::AsIntArray() {
+ DCHECK(GetClass()->IsArrayClass());
+ DCHECK(GetClass()->GetComponentType()->IsPrimitiveInt());
+ return down_cast<IntArray*>(this);
+}
+
+LongArray* Object::AsLongArray() {
+ DCHECK(GetClass()->IsArrayClass());
+ DCHECK(GetClass()->GetComponentType()->IsPrimitiveLong());
+ return down_cast<LongArray*>(this);
+}
+
String* Object::AsString() {
DCHECK(GetClass()->IsStringClass());
return down_cast<String*>(this);
}
+Throwable* Object::AsThrowable() {
+ DCHECK(GetClass()->IsThrowableClass());
+ return down_cast<Throwable*>(this);
+}
+
Object* Object::Clone(Thread* self) {
Class* c = GetClass();
DCHECK(!c->IsClassClass());
@@ -188,50 +230,38 @@
}
uint32_t Field::Get32(const Object* object) const {
- CHECK((object == NULL) == IsStatic()) << PrettyField(this);
- if (IsStatic()) {
- object = declaring_class_;
- }
+ DCHECK(object != NULL) << PrettyField(this);
+ DCHECK(IsStatic() == (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
return object->GetField32(GetOffset(), IsVolatile());
}
void Field::Set32(Object* object, uint32_t new_value) const {
- CHECK((object == NULL) == IsStatic()) << PrettyField(this);
- if (IsStatic()) {
- object = declaring_class_;
- }
+ DCHECK(object != NULL) << PrettyField(this);
+ DCHECK(IsStatic() == (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
object->SetField32(GetOffset(), new_value, IsVolatile());
}
uint64_t Field::Get64(const Object* object) const {
- CHECK((object == NULL) == IsStatic()) << PrettyField(this);
- if (IsStatic()) {
- object = declaring_class_;
- }
+ DCHECK(object != NULL) << PrettyField(this);
+ DCHECK(IsStatic() == (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
return object->GetField64(GetOffset(), IsVolatile());
}
void Field::Set64(Object* object, uint64_t new_value) const {
- CHECK((object == NULL) == IsStatic()) << PrettyField(this);
- if (IsStatic()) {
- object = declaring_class_;
- }
+ DCHECK(object != NULL) << PrettyField(this);
+ DCHECK(IsStatic() == (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
object->SetField64(GetOffset(), new_value, IsVolatile());
}
Object* Field::GetObj(const Object* object) const {
- CHECK((object == NULL) == IsStatic()) << PrettyField(this);
- if (IsStatic()) {
- object = declaring_class_;
- }
+ DCHECK(object != NULL) << PrettyField(this);
+ DCHECK(IsStatic() == (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
return object->GetFieldObject<Object*>(GetOffset(), IsVolatile());
}
void Field::SetObj(Object* object, const Object* new_value) const {
- CHECK((object == NULL) == IsStatic()) << PrettyField(this);
- if (IsStatic()) {
- object = declaring_class_;
- }
+ DCHECK(object != NULL) << PrettyField(this);
+ DCHECK(IsStatic() == (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
object->SetFieldObject(GetOffset(), new_value, IsVolatile());
}
@@ -603,7 +633,7 @@
return DexFile::kDexNoIndex;
}
-void AbstractMethod::Invoke(Thread* self, Object* receiver, JValue* args, JValue* result) const {
+void AbstractMethod::Invoke(Thread* self, Object* receiver, JValue* args, JValue* result) {
if (kIsDebugBuild) {
self->AssertThreadSuspensionIsAllowable();
CHECK_EQ(kRunnable, self->GetState());
@@ -617,26 +647,35 @@
// Pass everything as arguments.
AbstractMethod::InvokeStub* stub = GetInvokeStub();
- bool have_executable_code = (GetCode() != NULL);
- if (Runtime::Current()->IsStarted() && have_executable_code && stub != NULL) {
- bool log = false;
- if (log) {
- LOG(INFO) << StringPrintf("invoking %s code=%p stub=%p",
- PrettyMethod(this).c_str(), GetCode(), stub);
- }
- (*stub)(this, receiver, self, args, result);
- if (log) {
- LOG(INFO) << StringPrintf("returned %s code=%p stub=%p",
- PrettyMethod(this).c_str(), GetCode(), stub);
- }
- } else {
- LOG(INFO) << StringPrintf("not invoking %s code=%p stub=%p started=%s",
- PrettyMethod(this).c_str(), GetCode(), stub,
- Runtime::Current()->IsStarted() ? "true" : "false");
+ if (UNLIKELY(!Runtime::Current()->IsStarted())){
+ LOG(INFO) << "Not invoking " << PrettyMethod(this) << " for a runtime that isn't started.";
if (result != NULL) {
result->SetJ(0);
}
+ } else {
+ if (GetCode() != NULL && stub != NULL) {
+ bool log = false;
+ if (log) {
+ LOG(INFO) << StringPrintf("invoking %s code=%p stub=%p",
+ PrettyMethod(this).c_str(), GetCode(), stub);
+ }
+ (*stub)(this, receiver, self, args, result);
+ if (log) {
+ LOG(INFO) << StringPrintf("returned %s code=%p stub=%p",
+ PrettyMethod(this).c_str(), GetCode(), stub);
+ }
+ } else {
+ LOG(INFO) << "Not invoking " << PrettyMethod(this)
+ << " code=" << reinterpret_cast<const void*>(GetCode())
+ << " stub=" << reinterpret_cast<void*>(stub);
+ const bool kInterpretMethodsWithNoCode = false;
+ if (kInterpretMethodsWithNoCode) {
+ art::interpreter::EnterInterpreterFromInvoke(self, this, receiver, args, result);
+ } else if (result != NULL) {
+ result->SetJ(0);
+ }
+ }
}
// Pop transition.
diff --git a/src/object.h b/src/object.h
index 43aed33..61fa335 100644
--- a/src/object.h
+++ b/src/object.h
@@ -247,8 +247,17 @@
return down_cast<const Array*>(this);
}
+ BooleanArray* AsBooleanArray();
+ ByteArray* AsByteArray();
+ CharArray* AsCharArray();
+ ShortArray* AsShortArray();
+ IntArray* AsIntArray();
+ LongArray* AsLongArray();
+
String* AsString();
+ Throwable* AsThrowable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
bool IsMethod() const;
AbstractMethod* AsMethod() {
@@ -668,7 +677,7 @@
// Find the method that this method overrides
AbstractMethod* FindOverriddenMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void Invoke(Thread* self, Object* receiver, JValue* args, JValue* result) const
+ void Invoke(Thread* self, Object* receiver, JValue* args, JValue* result)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const void* GetCode() const {
@@ -1749,6 +1758,14 @@
return GetVTable()->Get(method->GetMethodIndex());
}
+ // Given a method implemented by this class' super class, return the specific implementation
+ // method for this class.
+ AbstractMethod* FindVirtualMethodForSuper(AbstractMethod* method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(!method->GetDeclaringClass()->IsInterface());
+ return GetSuperClass()->GetVTable()->Get(method->GetMethodIndex());
+ }
+
// Given a method implemented by this class, but potentially from a
// super class or interface, return the specific implementation
// method for this class.
@@ -2358,7 +2375,6 @@
}
void Set(int32_t i, T value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // TODO: ArrayStoreException
if (IsValidIndex(i)) {
GetData()[i] = value;
}
@@ -2397,10 +2413,7 @@
}
const CharArray* GetCharArray() const {
- const CharArray* result = GetFieldObject<const CharArray*>(
- ValueOffset(), false);
- DCHECK(result != NULL);
- return result;
+ return GetFieldObject<const CharArray*>(ValueOffset(), false);
}
int32_t GetOffset() const {
diff --git a/src/object_test.cc b/src/object_test.cc
index 9ad0534..e0443d0 100644
--- a/src/object_test.cc
+++ b/src/object_test.cc
@@ -238,15 +238,15 @@
Field* field = FindFieldFromCode(field_idx, clinit, Thread::Current(), StaticObjectRead,
sizeof(Object*));
- Object* s0 = field->GetObj(NULL);
+ Object* s0 = field->GetObj(klass);
EXPECT_EQ(NULL, s0);
SirtRef<CharArray> char_array(soa.Self(), CharArray::Alloc(soa.Self(), 0));
- field->SetObj(NULL, char_array.get());
- EXPECT_EQ(char_array.get(), field->GetObj(NULL));
+ field->SetObj(field->GetDeclaringClass(), char_array.get());
+ EXPECT_EQ(char_array.get(), field->GetObj(klass));
- field->SetObj(NULL, NULL);
- EXPECT_EQ(NULL, field->GetObj(NULL));
+ field->SetObj(field->GetDeclaringClass(), NULL);
+ EXPECT_EQ(NULL, field->GetObj(klass));
// TODO: more exhaustive tests of all 6 cases of Field::*FromCode
}
diff --git a/src/object_utils.h b/src/object_utils.h
index c6e71c3..661773a 100644
--- a/src/object_utils.h
+++ b/src/object_utils.h
@@ -647,7 +647,7 @@
Class* GetDexCacheResolvedType(uint16_t type_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetDexCache()->GetResolvedType(type_idx);
+ return method_->GetDexCacheResolvedTypes()->Get(type_idx);
}
const DexFile& GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -669,11 +669,26 @@
return result;
}
+ String* ResolveString(uint32_t string_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ String* s = method_->GetDexCacheStrings()->Get(string_idx);
+ if (UNLIKELY(s == NULL)) {
+ s = GetClassLinker()->ResolveString(GetDexFile(), string_idx, GetDexCache());
+ }
+ return s;
+ }
+
+ Class* ResolveClass(uint16_t type_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Class* c = GetDexCacheResolvedType(type_idx);
+ if (UNLIKELY(c == NULL)) {
+ c = GetClassLinker()->ResolveType(GetDexFile(), type_idx, GetDexCache(), GetClassLoader());
+ }
+ return c;
+ }
+
private:
// Set the method_ field, for proxy methods looking up the interface method via the resolved
// methods table.
- void SetMethod(const AbstractMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void SetMethod(const AbstractMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (method != NULL) {
Class* klass = method->GetDeclaringClass();
if (klass->IsProxyClass()) {
diff --git a/src/runtime.cc b/src/runtime.cc
index 7bc1b70..79d1fb2 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -191,7 +191,8 @@
if (!tll_already_held || !ml_already_held) {
os << "Dumping all threads without appropriate locks held:"
<< (!tll_already_held ? " thread list lock" : "")
- << (!ml_already_held ? " mutator lock" : "");
+ << (!ml_already_held ? " mutator lock" : "")
+ << "\n";
}
os << "All threads:\n";
Runtime::Current()->GetThreadList()->DumpLocked(os);
@@ -717,8 +718,12 @@
CHECK_EQ(self->GetState(), kNative);
JNIEnv* env = self->GetJniEnv();
- env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons, WellKnownClasses::java_lang_Daemons_start);
- CHECK(!env->ExceptionCheck());
+ env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
+ WellKnownClasses::java_lang_Daemons_start);
+ if (env->ExceptionCheck()) {
+ env->ExceptionDescribe();
+ LOG(FATAL) << "Error starting java.lang.Daemons";
+ }
VLOG(startup) << "Runtime::StartDaemonThreads exiting";
}
diff --git a/src/stack.h b/src/stack.h
index 845b840..3cf8577 100644
--- a/src/stack.h
+++ b/src/stack.h
@@ -37,6 +37,14 @@
class ShadowFrame {
public:
+ static ShadowFrame* Create(uint16_t num_refs, uint16_t num_vregs, ShadowFrame* link,
+ AbstractMethod* method, uint32_t dex_pc) {
+ size_t sz = sizeof(ShadowFrame) + (sizeof(Object*) * num_refs) + (sizeof(uint32_t) * num_vregs);
+ uint8_t* memory = new uint8_t[sz];
+ return new (memory) ShadowFrame(num_refs, num_vregs, link, method, dex_pc);
+ }
+ ~ShadowFrame() {}
+
uint32_t NumberOfReferences() const {
return number_of_references_;
}
@@ -76,6 +84,59 @@
references_[i] = object;
}
+ int32_t GetVReg(size_t i) const {
+ DCHECK_LT(i, number_of_vregs_);
+ const int8_t* vregs = reinterpret_cast<const int8_t*>(this) + VRegsOffset();
+ return reinterpret_cast<const int32_t*>(vregs)[i];
+ }
+
+ float GetVRegFloat(size_t i) const {
+ DCHECK_LT(i, number_of_vregs_);
+ const int8_t* vregs = reinterpret_cast<const int8_t*>(this) + VRegsOffset();
+ return reinterpret_cast<const float*>(vregs)[i];
+ }
+
+ int64_t GetVRegLong(size_t i) const {
+ const int8_t* vregs = reinterpret_cast<const int8_t*>(this) + VRegsOffset();
+ const int32_t* low_half = &reinterpret_cast<const int32_t*>(vregs)[i];
+ return *reinterpret_cast<const int64_t*>(low_half);
+ }
+
+ double GetVRegDouble(size_t i) const {
+ const int8_t* vregs = reinterpret_cast<const int8_t*>(this) + VRegsOffset();
+ const int32_t* low_half = &reinterpret_cast<const int32_t*>(vregs)[i];
+ return *reinterpret_cast<const double*>(low_half);
+ }
+
+ void SetVReg(size_t i, int32_t val) {
+ DCHECK_LT(i, number_of_vregs_);
+ int8_t* vregs = reinterpret_cast<int8_t*>(this) + VRegsOffset();
+ reinterpret_cast<int32_t*>(vregs)[i] = val;
+ }
+
+ void SetVRegFloat(size_t i, float val) {
+ DCHECK_LT(i, number_of_vregs_);
+ int8_t* vregs = reinterpret_cast<int8_t*>(this) + VRegsOffset();
+ reinterpret_cast<float*>(vregs)[i] = val;
+ }
+
+ void SetVRegLong(size_t i, int64_t val) {
+ int8_t* vregs = reinterpret_cast<int8_t*>(this) + VRegsOffset();
+ int32_t* low_half = &reinterpret_cast<int32_t*>(vregs)[i];
+ *reinterpret_cast<int64_t*>(low_half) = val;
+ }
+
+ void SetVRegDouble(size_t i, double val) {
+ int8_t* vregs = reinterpret_cast<int8_t*>(this) + VRegsOffset();
+ int32_t* low_half = &reinterpret_cast<int32_t*>(vregs)[i];
+ *reinterpret_cast<double*>(low_half) = val;
+ }
+
+ void SetReferenceAndVReg(size_t i, Object* val) {
+ SetReference(i, val);
+ SetVReg(i, reinterpret_cast<int32_t>(val));
+ }
+
AbstractMethod* GetMethod() const {
DCHECK_NE(method_, static_cast<void*>(NULL));
return method_;
@@ -126,22 +187,31 @@
return OFFSETOF_MEMBER(ShadowFrame, references_);
}
- size_t VRegsOffset() {
+ size_t VRegsOffset() const {
return ReferencesOffset() + (sizeof(Object*) * NumberOfReferences());
}
private:
- // ShadowFrame should be allocated by the generated code directly.
- // We should not create new shadow stack in the runtime support function.
- ~ShadowFrame() {}
+ ShadowFrame(uint16_t num_refs, uint16_t num_vregs, ShadowFrame* link, AbstractMethod* method,
+ uint32_t dex_pc)
+ : number_of_references_ (num_refs), number_of_vregs_(num_vregs), link_(link),
+ method_(method), dex_pc_(dex_pc) {
+ for (size_t i = 0; i < num_refs; ++i) {
+ SetReference(i, NULL);
+ }
+ for (size_t i = 0; i < num_vregs; ++i) {
+ SetVReg(i, 0);
+ }
+ }
+ // TODO: make the majority of these fields const.
uint16_t number_of_references_;
uint16_t number_of_vregs_;
// Link to previous shadow frame or NULL.
ShadowFrame* link_;
AbstractMethod* method_;
uint32_t dex_pc_;
- Object* references_[];
+ Object* references_[0];
DISALLOW_IMPLICIT_CONSTRUCTORS(ShadowFrame);
};
diff --git a/src/thread.cc b/src/thread.cc
index 67773a5..2f8a9a7 100644
--- a/src/thread.cc
+++ b/src/thread.cc
@@ -292,9 +292,6 @@
SetUpAlternateSignalStack();
InitCpu();
InitFunctionPointers();
-#ifdef ART_USE_GREENLAND_COMPILER
- InitRuntimeEntryPoints(&runtime_entry_points_);
-#endif
InitCardTable();
InitTid();
@@ -555,7 +552,7 @@
}
}
-bool Thread::RequestCheckpoint(CheckpointFunction* function) {
+bool Thread::RequestCheckpoint(Closure* function) {
CHECK(!ReadFlag(kCheckpointRequest)) << "Already have a pending checkpoint request";
checkpoint_function_ = function;
union StateAndFlags old_state_and_flags = state_and_flags_;
diff --git a/src/thread.h b/src/thread.h
index abfd719..798e96a 100644
--- a/src/thread.h
+++ b/src/thread.h
@@ -25,6 +25,7 @@
#include <string>
#include <vector>
+#include "closure.h"
#include "globals.h"
#include "macros.h"
#include "oat/runtime/oat_support_entrypoints.h"
@@ -35,9 +36,6 @@
#include "stack_indirect_reference_table.h"
#include "trace.h"
#include "UniquePtr.h"
-#ifdef ART_USE_GREENLAND_COMPILER
-#include "greenland/runtime_entry_points.h"
-#endif
namespace art {
@@ -106,12 +104,6 @@
class PACKED Thread {
public:
- class CheckpointFunction {
- public:
- virtual ~CheckpointFunction() { }
- virtual void Run(Thread* self) = 0;
- };
-
// Space to throw a StackOverflowError in.
#if !defined(ART_USE_LLVM_COMPILER)
static const size_t kStackOverflowReservedBytes = 4 * KB;
@@ -183,7 +175,7 @@
void ModifySuspendCount(Thread* self, int delta, bool for_debugger)
EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_);
- bool RequestCheckpoint(CheckpointFunction* function);
+ bool RequestCheckpoint(Closure* function);
// Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
// mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
@@ -776,15 +768,12 @@
const char* last_no_thread_suspension_cause_;
// Pending checkpoint functions.
- CheckpointFunction* checkpoint_function_;
+ Closure* checkpoint_function_;
public:
// Runtime support function pointers
// TODO: move this near the top, since changing its offset requires all oats to be recompiled!
EntryPoints entrypoints_;
-#ifdef ART_USE_GREENLAND_COMPILER
- RuntimeEntryPoints runtime_entry_points_;
-#endif
private:
// How many times has our pthread key's destructor been called?
diff --git a/src/thread_list.cc b/src/thread_list.cc
index 4ad25ae..4b2e17f 100644
--- a/src/thread_list.cc
+++ b/src/thread_list.cc
@@ -151,7 +151,7 @@
}
#endif
-size_t ThreadList::RunCheckpoint(Thread::CheckpointFunction* checkpoint_function) {
+size_t ThreadList::RunCheckpoint(Closure* checkpoint_function) {
Thread* self = Thread::Current();
if (kIsDebugBuild) {
Locks::mutator_lock_->AssertNotHeld(self);
diff --git a/src/thread_list.h b/src/thread_list.h
index a41fa57..d64183b 100644
--- a/src/thread_list.h
+++ b/src/thread_list.h
@@ -57,7 +57,7 @@
// Run a checkpoint on threads, running threads are not suspended but run the checkpoint inside
// of the suspend check. Returns how many checkpoints we should expect to run.
- size_t RunCheckpoint(Thread::CheckpointFunction* checkpoint_function);
+ size_t RunCheckpoint(Closure* checkpoint_function);
LOCKS_EXCLUDED(Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_);
diff --git a/src/thread_pool.cc b/src/thread_pool.cc
new file mode 100644
index 0000000..fa0cf79
--- /dev/null
+++ b/src/thread_pool.cc
@@ -0,0 +1,124 @@
+#include "runtime.h"
+#include "stl_util.h"
+#include "thread.h"
+#include "thread_pool.h"
+
+namespace art {
+
+ThreadPoolWorker::ThreadPoolWorker(ThreadPool* thread_pool, const std::string& name,
+ size_t stack_size)
+ : thread_pool_(thread_pool),
+ name_(name),
+ stack_size_(stack_size) {
+ const char* reason = "new thread pool worker thread";
+ CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), reason);
+ CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), reason);
+ CHECK_PTHREAD_CALL(pthread_create, (&pthread_, &attr, &Callback, this), reason);
+ CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), reason);
+}
+
+ThreadPoolWorker::~ThreadPoolWorker() {
+ CHECK_PTHREAD_CALL(pthread_join, (pthread_, NULL), "thread pool worker shutdown");
+}
+
+void ThreadPoolWorker::Run() {
+ Thread* self = Thread::Current();
+ Closure* task = NULL;
+ while ((task = thread_pool_->GetTask(self)) != NULL) {
+ task->Run(self);
+ }
+}
+
+void* ThreadPoolWorker::Callback(void* arg) {
+ ThreadPoolWorker* worker = reinterpret_cast<ThreadPoolWorker*>(arg);
+ Runtime* runtime = Runtime::Current();
+ CHECK(runtime->AttachCurrentThread(worker->name_.c_str(), true, NULL));
+ // Do work until its time to shut down.
+ worker->Run();
+ runtime->DetachCurrentThread();
+ return NULL;
+}
+
+void ThreadPool::AddTask(Thread* self, Closure* task){
+ MutexLock mu(self, task_queue_lock_);
+ tasks_.push_back(task);
+ // If we have any waiters, signal one.
+ if (waiting_count_ != 0) {
+ task_queue_condition_.Signal(self);
+ }
+}
+
+void ThreadPool::AddThread(size_t stack_size) {
+ threads_.push_back(
+ new ThreadPoolWorker(
+ this,
+ StringPrintf("Thread pool worker %d", static_cast<int>(GetThreadCount())),
+ stack_size));
+}
+
+ThreadPool::ThreadPool(size_t num_threads)
+ : task_queue_lock_("task queue lock"),
+ task_queue_condition_("task queue condition", task_queue_lock_),
+ completion_condition_("task completion condition", task_queue_lock_),
+ started_(false),
+ shutting_down_(false),
+ waiting_count_(0) {
+ while (GetThreadCount() < num_threads) {
+ AddThread(ThreadPoolWorker::kDefaultStackSize);
+ }
+}
+
+ThreadPool::~ThreadPool() {
+ // Tell any remaining workers to shut down.
+ shutting_down_ = true;
+ android_memory_barrier();
+ // Broadcast to everyone waiting.
+ task_queue_condition_.Broadcast(Thread::Current());
+ // Wait for the threads to finish.
+ STLDeleteElements(&threads_);
+}
+
+void ThreadPool::StartWorkers(Thread* self) {
+ MutexLock mu(self, task_queue_lock_);
+ started_ = true;
+ android_memory_barrier();
+ task_queue_condition_.Broadcast(self);
+}
+
+void ThreadPool::StopWorkers(Thread* self) {
+ MutexLock mu(self, task_queue_lock_);
+ started_ = false;
+ android_memory_barrier();
+}
+
+Closure* ThreadPool::GetTask(Thread* self) {
+ MutexLock mu(self, task_queue_lock_);
+ while (!shutting_down_) {
+ if (started_ && !tasks_.empty()) {
+ Closure* task = tasks_.front();
+ tasks_.pop_front();
+ return task;
+ }
+
+ waiting_count_++;
+ if (waiting_count_ == GetThreadCount() && tasks_.empty()) {
+ // We may be done, lets broadcast to the completion condition.
+ completion_condition_.Broadcast(self);
+ }
+ task_queue_condition_.Wait(self);
+ waiting_count_--;
+ }
+
+ // We are shutting down, return NULL to tell the worker thread to stop looping.
+ return NULL;
+}
+
+void ThreadPool::Wait(Thread* self) {
+ MutexLock mu(self, task_queue_lock_);
+ // Wait until each thread is waiting and the task list is empty.
+ while (waiting_count_ != GetThreadCount() || !tasks_.empty()) {
+ completion_condition_.Wait(self);
+ }
+}
+
+} // namespace art
diff --git a/src/thread_pool.h b/src/thread_pool.h
new file mode 100644
index 0000000..22e30b7
--- /dev/null
+++ b/src/thread_pool.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_THREAD_POOL_H_
+#define ART_SRC_THREAD_POOL_H_
+
+#include <deque>
+#include <vector>
+
+#include "locks.h"
+#include "../src/mutex.h"
+
+namespace art {
+
+class Closure;
+class ThreadPool;
+
+class ThreadPoolWorker {
+ public:
+ static const size_t kDefaultStackSize = 1 * MB;
+
+ size_t GetStackSize() const {
+ return stack_size_;
+ }
+
+ virtual ~ThreadPoolWorker();
+
+ private:
+ ThreadPoolWorker(ThreadPool* thread_pool, const std::string& name, size_t stack_size);
+ static void* Callback(void* arg) LOCKS_EXCLUDED(Locks::mutator_lock_);
+ void Run();
+
+ ThreadPool* thread_pool_;
+ const std::string name_;
+ const size_t stack_size_;
+ pthread_t pthread_;
+ pthread_attr_t attr;
+
+ friend class ThreadPool;
+ DISALLOW_COPY_AND_ASSIGN(ThreadPoolWorker);
+};
+
+class ThreadPool {
+ public:
+ // Returns the number of threads in the thread pool.
+ size_t GetThreadCount() const {
+ return threads_.size();
+ }
+
+ // Broadcast to the workers and tell them to empty out the work queue.
+ void StartWorkers(Thread* self);
+
+ // Do not allow workers to grab any new tasks.
+ void StopWorkers(Thread* self);
+
+ // Add a new task, the first available started worker will process it. Does not delete the task
+ // after running it, it is the caller's responsibility.
+ void AddTask(Thread* self, Closure* task);
+
+ ThreadPool(size_t num_threads);
+ virtual ~ThreadPool();
+
+ // Wait for all tasks currently on queue to get completed.
+ void Wait(Thread* self);
+
+ private:
+ // Add a new task.
+ void AddThread(size_t stack_size);
+
+ // Get a task to run, blocks if there are no tasks left
+ Closure* GetTask(Thread* self);
+
+ Mutex task_queue_lock_;
+ ConditionVariable task_queue_condition_ GUARDED_BY(task_queue_lock_);
+ ConditionVariable completion_condition_ GUARDED_BY(task_queue_lock_);
+ volatile bool started_ GUARDED_BY(task_queue_lock_);
+ volatile bool shutting_down_ GUARDED_BY(task_queue_lock_);
+ // How many worker threads are waiting on the condition.
+ volatile size_t waiting_count_ GUARDED_BY(task_queue_lock_);
+ std::deque<Closure*> tasks_ GUARDED_BY(task_queue_lock_);
+ // TODO: make this immutable/const?
+ std::vector<ThreadPoolWorker*> threads_;
+
+ friend class ThreadPoolWorker;
+ DISALLOW_COPY_AND_ASSIGN(ThreadPool);
+};
+
+} // namespace art
+
+#endif // ART_SRC_THREAD_POOL_H_
diff --git a/src/thread_pool_test.cc b/src/thread_pool_test.cc
new file mode 100644
index 0000000..783f786
--- /dev/null
+++ b/src/thread_pool_test.cc
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <string>
+
+#include "atomic_integer.h"
+#include "common_test.h"
+#include "thread_pool.h"
+
+namespace art {
+
+class CountClosure : public Closure {
+ public:
+ CountClosure(AtomicInteger* count) : count_(count) {
+
+ }
+
+ void Run(Thread* /* self */) {
+ // Simulate doing some work.
+ usleep(100);
+ // Increment the counter which keeps track of work completed.
+ ++*count_;
+ delete this;
+ }
+
+ private:
+ AtomicInteger* const count_;
+};
+
+class ThreadPoolTest : public CommonTest {
+ public:
+ static int32_t num_threads;
+};
+
+int32_t ThreadPoolTest::num_threads = 4;
+
+// Check that the thread pool actually runs tasks that you assign it.
+TEST_F(ThreadPoolTest, CheckRun) {
+ Thread* self = Thread::Current();
+ ThreadPool thread_pool(num_threads);
+ AtomicInteger count = 0;
+ static const int32_t num_tasks = num_threads * 4;
+ for (int32_t i = 0; i < num_tasks; ++i) {
+ thread_pool.AddTask(self, new CountClosure(&count));
+ }
+ thread_pool.StartWorkers(self);
+ // Wait for tasks to complete.
+ thread_pool.Wait(self);
+ // Make sure that we finished all the work.
+ EXPECT_EQ(num_tasks, count);
+}
+
+TEST_F(ThreadPoolTest, StopStart) {
+ Thread* self = Thread::Current();
+ ThreadPool thread_pool(num_threads);
+ AtomicInteger count = 0;
+ static const int32_t num_tasks = num_threads * 4;
+ for (int32_t i = 0; i < num_tasks; ++i) {
+ thread_pool.AddTask(self, new CountClosure(&count));
+ }
+ usleep(200);
+ // Check that no threads started prematurely.
+ EXPECT_EQ(0, count);
+ // Signal the threads to start processing tasks.
+ thread_pool.StartWorkers(self);
+ usleep(200);
+ thread_pool.StopWorkers(self);
+ AtomicInteger bad_count = 0;
+ thread_pool.AddTask(self, new CountClosure(&bad_count));
+ usleep(200);
+ // Ensure that the task added after the workers were stopped doesn't get run.
+ EXPECT_EQ(0, bad_count);
+}
+
+class TreeClosure : public Closure {
+ public:
+ TreeClosure(ThreadPool* const thread_pool, AtomicInteger* count, int depth)
+ : thread_pool_(thread_pool),
+ count_(count),
+ depth_(depth) {
+
+ }
+
+ void Run(Thread* self) {
+ if (depth_ > 1) {
+ thread_pool_->AddTask(self, new TreeClosure(thread_pool_, count_, depth_ - 1));
+ thread_pool_->AddTask(self, new TreeClosure(thread_pool_, count_, depth_ - 1));
+ }
+ // Increment the counter which keeps track of work completed.
+ ++*count_;
+ delete this;
+ }
+
+ private:
+ ThreadPool* const thread_pool_;
+ AtomicInteger* const count_;
+ const int depth_;
+};
+
+// Test that adding new tasks from within a task works.
+TEST_F(ThreadPoolTest, RecursiveTest) {
+ Thread* self = Thread::Current();
+ ThreadPool thread_pool(num_threads);
+ AtomicInteger count = 0;
+ static const int depth = 8;
+ thread_pool.AddTask(self, new TreeClosure(&thread_pool, &count, depth));
+ thread_pool.StartWorkers(self);
+ thread_pool.Wait(self);
+ EXPECT_EQ((1 << depth) - 1, count);
+}
+
+} // namespace art
diff --git a/src/trace.cc b/src/trace.cc
index d0132e1..753b80f 100644
--- a/src/trace.cc
+++ b/src/trace.cc
@@ -241,6 +241,7 @@
void Trace::SaveAndUpdateCode(AbstractMethod* method) {
#if defined(ART_USE_LLVM_COMPILER)
+ UNUSED(method);
UNIMPLEMENTED(FATAL);
#else
void* trace_stub = GetLogTraceEntryPoint();
diff --git a/src/verifier/method_verifier.cc b/src/verifier/method_verifier.cc
index 67507bc..9a933bf 100644
--- a/src/verifier/method_verifier.cc
+++ b/src/verifier/method_verifier.cc
@@ -32,7 +32,7 @@
#include "runtime.h"
#include "stringpiece.h"
-#if defined(ART_USE_LLVM_COMPILER) || defined(ART_USE_GREENLAND_COMPILER)
+#if defined(ART_USE_LLVM_COMPILER)
#include "greenland/backend_types.h"
#include "greenland/inferred_reg_category_map.h"
#endif
@@ -962,7 +962,7 @@
return true;
}
-#if !defined(ART_USE_LLVM_COMPILER) && !defined(ART_USE_GREENLAND_COMPILER)
+#if !defined(ART_USE_LLVM_COMPILER)
static const std::vector<uint8_t>* CreateLengthPrefixedDexGcMap(const std::vector<uint8_t>& gc_map) {
std::vector<uint8_t>* length_prefixed_gc_map = new std::vector<uint8_t>;
length_prefixed_gc_map->push_back((gc_map.size() & 0xff000000) >> 24);
@@ -1012,7 +1012,7 @@
Compiler::MethodReference ref(dex_file_, method_idx_);
-#if !defined(ART_USE_LLVM_COMPILER) && !defined(ART_USE_GREENLAND_COMPILER)
+#if !defined(ART_USE_LLVM_COMPILER)
/* Generate a register map and add it to the method. */
UniquePtr<const std::vector<uint8_t> > map(GenerateGcMap());
@@ -1026,7 +1026,7 @@
const std::vector<uint8_t>* dex_gc_map = CreateLengthPrefixedDexGcMap(*(map.get()));
verifier::MethodVerifier::SetDexGcMap(ref, *dex_gc_map);
-#else // defined(ART_USE_LLVM_COMPILER) || defined(ART_USE_GREENLAND_COMPILER)
+#else // defined(ART_USE_LLVM_COMPILER)
/* Generate Inferred Register Category for LLVM-based Code Generator */
const InferredRegCategoryMap* table = GenerateInferredRegCategoryMap();
verifier::MethodVerifier::SetInferredRegCategoryMap(ref, *table);
@@ -1501,15 +1501,23 @@
break;
case Instruction::CONST_4:
+ /* could be boolean, int, float, or a null reference */
+ work_line_->SetRegisterType(dec_insn.vA,
+ reg_types_.FromCat1Const((dec_insn.vB << 28) >> 28));
+ break;
case Instruction::CONST_16:
+ /* could be boolean, int, float, or a null reference */
+ work_line_->SetRegisterType(dec_insn.vA,
+ reg_types_.FromCat1Const(static_cast<int16_t>(dec_insn.vB)));
+ break;
case Instruction::CONST:
/* could be boolean, int, float, or a null reference */
- work_line_->SetRegisterType(dec_insn.vA, reg_types_.FromCat1Const((int32_t) dec_insn.vB));
+ work_line_->SetRegisterType(dec_insn.vA, reg_types_.FromCat1Const(dec_insn.vB));
break;
case Instruction::CONST_HIGH16:
/* could be boolean, int, float, or a null reference */
work_line_->SetRegisterType(dec_insn.vA,
- reg_types_.FromCat1Const((int32_t) dec_insn.vB << 16));
+ reg_types_.FromCat1Const(dec_insn.vB << 16));
break;
case Instruction::CONST_WIDE_16:
case Instruction::CONST_WIDE_32:
@@ -3262,7 +3270,7 @@
Mutex* MethodVerifier::rejected_classes_lock_ = NULL;
MethodVerifier::RejectedClassesTable* MethodVerifier::rejected_classes_ = NULL;
-#if defined(ART_USE_LLVM_COMPILER) || defined(ART_USE_GREENLAND_COMPILER)
+#if defined(ART_USE_LLVM_COMPILER)
Mutex* MethodVerifier::inferred_reg_category_maps_lock_ = NULL;
MethodVerifier::InferredRegCategoryMapTable* MethodVerifier::inferred_reg_category_maps_ = NULL;
#endif
@@ -3281,7 +3289,7 @@
rejected_classes_ = new MethodVerifier::RejectedClassesTable;
}
-#if defined(ART_USE_LLVM_COMPILER) || defined(ART_USE_GREENLAND_COMPILER)
+#if defined(ART_USE_LLVM_COMPILER)
inferred_reg_category_maps_lock_ = new Mutex("verifier GC maps lock");
{
MutexLock mu(self, *inferred_reg_category_maps_lock_);
@@ -3309,7 +3317,7 @@
delete rejected_classes_lock_;
rejected_classes_lock_ = NULL;
-#if defined(ART_USE_LLVM_COMPILER) || defined(ART_USE_GREENLAND_COMPILER)
+#if defined(ART_USE_LLVM_COMPILER)
{
MutexLock mu(self, *inferred_reg_category_maps_lock_);
STLDeleteValues(inferred_reg_category_maps_);
@@ -3334,7 +3342,7 @@
return (rejected_classes_->find(ref) != rejected_classes_->end());
}
-#if defined(ART_USE_LLVM_COMPILER) || defined(ART_USE_GREENLAND_COMPILER)
+#if defined(ART_USE_LLVM_COMPILER)
const greenland::InferredRegCategoryMap* MethodVerifier::GenerateInferredRegCategoryMap() {
uint32_t insns_size = code_item_->insns_size_in_code_units_;
uint16_t regs_size = code_item_->registers_size_;
diff --git a/src/verifier/method_verifier.h b/src/verifier/method_verifier.h
index 6765c10..42283a2 100644
--- a/src/verifier/method_verifier.h
+++ b/src/verifier/method_verifier.h
@@ -39,7 +39,7 @@
struct ReferenceMap2Visitor;
-#if defined(ART_USE_LLVM_COMPILER) || defined(ART_USE_GREENLAND_COMPILER)
+#if defined(ART_USE_LLVM_COMPILER)
namespace greenland {
class InferredRegCategoryMap;
} // namespace greenland
@@ -143,7 +143,7 @@
// The verifier
class MethodVerifier {
-#if defined(ART_USE_LLVM_COMPILER) || defined(ART_USE_GREENLAND_COMPILER)
+#if defined(ART_USE_LLVM_COMPILER)
typedef greenland::InferredRegCategoryMap InferredRegCategoryMap;
#endif
@@ -203,7 +203,7 @@
static void Init();
static void Shutdown();
-#if defined(ART_USE_LLVM_COMPILER) || defined(ART_USE_GREENLAND_COMPILER)
+#if defined(ART_USE_LLVM_COMPILER)
static const InferredRegCategoryMap* GetInferredRegCategoryMap(Compiler::MethodReference ref)
LOCKS_EXCLUDED(inferred_reg_category_maps_lock_);
#endif
@@ -554,7 +554,7 @@
// Get a type representing the declaring class of the method.
const RegType& GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-#if defined(ART_USE_LLVM_COMPILER) || defined(ART_USE_GREENLAND_COMPILER)
+#if defined(ART_USE_LLVM_COMPILER)
/*
* Generate the inferred register category for LLVM-based code generator.
* Returns a pointer to a two-dimension Class array, or NULL on failure.
@@ -589,7 +589,7 @@
static Mutex* rejected_classes_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
static RejectedClassesTable* rejected_classes_;
-#if defined(ART_USE_LLVM_COMPILER) || defined(ART_USE_GREENLAND_COMPILER)
+#if defined(ART_USE_LLVM_COMPILER)
// All the inferred register category maps that the verifier has created.
typedef SafeMap<const Compiler::MethodReference,
const InferredRegCategoryMap*> InferredRegCategoryMapTable;