Merge "Increase dex2oat timeout for run-tests"
diff --git a/build/Android.bp b/build/Android.bp
index 8e8a2f6..ff762dd 100644
--- a/build/Android.bp
+++ b/build/Android.bp
@@ -83,7 +83,7 @@
"bionic/libc/private",
],
},
- linux: {
+ linux_glibc: {
cflags: [
// Enable missing-noreturn only on non-Mac. As lots of things are not implemented for
// Apple, it's a pain.
diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk
index f4f8d49..3247e54 100644
--- a/build/Android.common_path.mk
+++ b/build/Android.common_path.mk
@@ -84,12 +84,6 @@
HOST_CORE_DEX_FILES := $(foreach jar,$(HOST_CORE_JARS), $(call intermediates-dir-for,JAVA_LIBRARIES,$(jar),t,COMMON)/javalib.jar)
TARGET_CORE_DEX_FILES := $(foreach jar,$(TARGET_CORE_JARS),$(call intermediates-dir-for,JAVA_LIBRARIES,$(jar), ,COMMON)/javalib.jar)
-# Classpath for Jack compilation: we only need core-libart.
-HOST_JACK_CLASSPATH_DEPENDENCIES := $(call intermediates-dir-for,JAVA_LIBRARIES,core-oj-hostdex,t,COMMON)/classes.jack $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart-hostdex,t,COMMON)/classes.jack
-HOST_JACK_CLASSPATH := $(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-oj-hostdex,t,COMMON)/classes.jack):$(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart-hostdex,t,COMMON)/classes.jack)
-TARGET_JACK_CLASSPATH_DEPENDENCIES := $(call intermediates-dir-for,JAVA_LIBRARIES,core-oj, ,COMMON)/classes.jack $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart, ,COMMON)/classes.jack
-TARGET_JACK_CLASSPATH := $(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-oj, ,COMMON)/classes.jack):$(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart, ,COMMON)/classes.jack)
-
ART_HOST_DEX_DEPENDENCIES := $(foreach jar,$(HOST_CORE_JARS),$(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar)
ART_TARGET_DEX_DEPENDENCIES := $(foreach jar,$(TARGET_CORE_JARS),$(TARGET_OUT_JAVA_LIBRARIES)/$(jar).jar)
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index 1ae79ac..37e6d42 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -202,9 +202,9 @@
# $(5): a make variable used to collate target dependencies, e.g ART_TEST_TARGET_OAT_HelloWorld_DEX
# $(6): a make variable used to collate host dependencies, e.g ART_TEST_HOST_OAT_HelloWorld_DEX
#
-# If the input test directory contains a file called main.list and main.jpp,
+# If the input test directory contains a file called main.list,
# then a multi-dex file is created passing main.list as the --main-dex-list
-# argument to dx and main.jpp for Jack.
+# argument to dx.
define build-art-test-dex
ifeq ($(ART_BUILD_TARGET),true)
include $(CLEAR_VARS)
@@ -219,7 +219,6 @@
LOCAL_DEX_PREOPT_IMAGE_LOCATION := $(TARGET_CORE_IMG_OUT)
ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),)
LOCAL_DX_FLAGS := --multi-dex --main-dex-list=$(LOCAL_PATH)/$(2)/main.list --minimal-main-dex
- LOCAL_JACK_FLAGS := -D jack.dex.output.policy=minimal-multidex -D jack.preprocessor=true -D jack.preprocessor.file=$(LOCAL_PATH)/$(2)/main.jpp
endif
include $(BUILD_JAVA_LIBRARY)
$(5) := $$(LOCAL_INSTALLED_MODULE)
@@ -235,7 +234,6 @@
LOCAL_DEX_PREOPT_IMAGE := $(HOST_CORE_IMG_LOCATION)
ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),)
LOCAL_DX_FLAGS := --multi-dex --main-dex-list=$(LOCAL_PATH)/$(2)/main.list --minimal-main-dex
- LOCAL_JACK_FLAGS := -D jack.dex.output.policy=minimal-multidex -D jack.preprocessor=true -D jack.preprocessor.file=$(LOCAL_PATH)/$(2)/main.jpp
endif
include $(BUILD_HOST_DALVIK_JAVA_LIBRARY)
$(6) := $$(LOCAL_INSTALLED_MODULE)
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 571c91a..0f92a25 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -604,7 +604,7 @@
endif
.PHONY: $$(rule_name)
-$$(rule_name): $$(dependencies)
+$$(rule_name): $$(dependencies) dx d8
$(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@)
# Clear locally defined variables.
diff --git a/cmdline/cmdline.h b/cmdline/cmdline.h
index 18ca944..c5d3a6b 100644
--- a/cmdline/cmdline.h
+++ b/cmdline/cmdline.h
@@ -78,6 +78,7 @@
*filename = cache_filename;
return true;
} else {
+ *filename = system_filename;
return false;
}
}
@@ -217,7 +218,7 @@
// Specified by --boot-image.
const char* boot_image_location_ = nullptr;
// Specified by --instruction-set.
- InstructionSet instruction_set_ = kRuntimeISA;
+ InstructionSet instruction_set_ = InstructionSet::kNone;
// Specified by --output.
std::ostream* os_ = &std::cout;
std::unique_ptr<std::ofstream> out_; // If something besides cout is used
@@ -230,6 +231,10 @@
*error_msg = "--boot-image must be specified";
return false;
}
+ if (instruction_set_ == InstructionSet::kNone) {
+ LOG(WARNING) << "No instruction set given, assuming " << GetInstructionSetString(kRuntimeISA);
+ instruction_set_ = kRuntimeISA;
+ }
DBG_LOG << "boot image location: " << boot_image_location_;
@@ -266,8 +271,10 @@
// Check that the boot image location points to a valid file name.
std::string file_name;
if (!LocationToFilename(boot_image_location, instruction_set_, &file_name)) {
- *error_msg = android::base::StringPrintf("No corresponding file for location '%s' exists",
- boot_image_location.c_str());
+ *error_msg = android::base::StringPrintf(
+ "No corresponding file for location '%s' (filename '%s') exists",
+ boot_image_location.c_str(),
+ file_name.c_str());
return false;
}
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index 521156a..87bf1c4 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -290,26 +290,42 @@
static const char* Name() { return "double"; }
};
+template <typename T>
+static inline CmdlineParseResult<T> ParseNumeric(const std::string& str) {
+ static_assert(sizeof(T) < sizeof(long long int), // NOLINT [runtime/int] [4]
+ "Current support is restricted.");
+
+ const char* begin = str.c_str();
+ char* end;
+
+ // Parse into a larger type (long long) because we can't use strtoul
+ // since it silently converts negative values into unsigned long and doesn't set errno.
+ errno = 0;
+ long long int result = strtoll(begin, &end, 10); // NOLINT [runtime/int] [4]
+ if (begin == end || *end != '\0' || errno == EINVAL) {
+ return CmdlineParseResult<T>::Failure("Failed to parse integer from " + str);
+ } else if ((errno == ERANGE) || // NOLINT [runtime/int] [4]
+ result < std::numeric_limits<T>::min() || result > std::numeric_limits<T>::max()) {
+ return CmdlineParseResult<T>::OutOfRange(
+ "Failed to parse integer from " + str + "; out of range");
+ }
+
+ return CmdlineParseResult<T>::Success(static_cast<T>(result));
+}
+
template <>
struct CmdlineType<unsigned int> : CmdlineTypeParser<unsigned int> {
Result Parse(const std::string& str) {
- const char* begin = str.c_str();
- char* end;
+ return ParseNumeric<unsigned int>(str);
+ }
- // Parse into a larger type (long long) because we can't use strtoul
- // since it silently converts negative values into unsigned long and doesn't set errno.
- errno = 0;
- long long int result = strtoll(begin, &end, 10); // NOLINT [runtime/int] [4]
- if (begin == end || *end != '\0' || errno == EINVAL) {
- return Result::Failure("Failed to parse integer from " + str);
- } else if ((errno == ERANGE) || // NOLINT [runtime/int] [4]
- result < std::numeric_limits<int>::min()
- || result > std::numeric_limits<unsigned int>::max() || result < 0) {
- return Result::OutOfRange(
- "Failed to parse integer from " + str + "; out of unsigned int range");
- }
+ static const char* Name() { return "unsigned integer"; }
+};
- return Result::Success(static_cast<unsigned int>(result));
+template <>
+struct CmdlineType<int> : CmdlineTypeParser<int> {
+ Result Parse(const std::string& str) {
+ return ParseNumeric<int>(str);
}
static const char* Name() { return "unsigned integer"; }
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 3044890..1e4cdf2 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -135,6 +135,7 @@
"linker/mips/relative_patcher_mips.cc",
"optimizing/code_generator_mips.cc",
"optimizing/code_generator_vector_mips.cc",
+ "optimizing/instruction_simplifier_mips.cc",
"optimizing/intrinsics_mips.cc",
"optimizing/pc_relative_fixups_mips.cc",
"utils/mips/assembler_mips.cc",
@@ -192,6 +193,10 @@
"liblzma",
],
include_dirs: ["art/disassembler"],
+ header_libs: [
+ "art_cmdlineparser_headers", // For compiler_options.
+ ],
+
export_include_dirs: ["."],
}
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index 538845d..b6cedff 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -18,7 +18,13 @@
#include <fstream>
+#include "android-base/stringprintf.h"
+
+#include "base/variant_map.h"
+#include "cmdline_parser.h"
+#include "compiler_options_map-inl.h"
#include "runtime.h"
+#include "simple_compiler_options_map.h"
namespace art {
@@ -71,115 +77,50 @@
(kIsTargetBuild || IsCoreImage() || Runtime::Current()->UseJitCompilation());
}
-void CompilerOptions::ParseHugeMethodMax(const StringPiece& option, UsageFn Usage) {
- ParseUintOption(option, "--huge-method-max", &huge_method_threshold_, Usage);
-}
-
-void CompilerOptions::ParseLargeMethodMax(const StringPiece& option, UsageFn Usage) {
- ParseUintOption(option, "--large-method-max", &large_method_threshold_, Usage);
-}
-
-void CompilerOptions::ParseSmallMethodMax(const StringPiece& option, UsageFn Usage) {
- ParseUintOption(option, "--small-method-max", &small_method_threshold_, Usage);
-}
-
-void CompilerOptions::ParseTinyMethodMax(const StringPiece& option, UsageFn Usage) {
- ParseUintOption(option, "--tiny-method-max", &tiny_method_threshold_, Usage);
-}
-
-void CompilerOptions::ParseNumDexMethods(const StringPiece& option, UsageFn Usage) {
- ParseUintOption(option, "--num-dex-methods", &num_dex_methods_threshold_, Usage);
-}
-
-void CompilerOptions::ParseInlineMaxCodeUnits(const StringPiece& option, UsageFn Usage) {
- ParseUintOption(option, "--inline-max-code-units", &inline_max_code_units_, Usage);
-}
-
-void CompilerOptions::ParseDumpInitFailures(const StringPiece& option,
- UsageFn Usage ATTRIBUTE_UNUSED) {
- DCHECK(option.starts_with("--dump-init-failures="));
- std::string file_name = option.substr(strlen("--dump-init-failures=")).data();
- init_failure_output_.reset(new std::ofstream(file_name));
+bool CompilerOptions::ParseDumpInitFailures(const std::string& option, std::string* error_msg) {
+ init_failure_output_.reset(new std::ofstream(option));
if (init_failure_output_.get() == nullptr) {
- LOG(ERROR) << "Failed to allocate ofstream";
+ *error_msg = "Failed to construct std::ofstream";
+ return false;
} else if (init_failure_output_->fail()) {
- LOG(ERROR) << "Failed to open " << file_name << " for writing the initialization "
- << "failures.";
+ *error_msg = android::base::StringPrintf(
+ "Failed to open %s for writing the initialization failures.", option.c_str());
init_failure_output_.reset();
- }
-}
-
-void CompilerOptions::ParseRegisterAllocationStrategy(const StringPiece& option,
- UsageFn Usage) {
- DCHECK(option.starts_with("--register-allocation-strategy="));
- StringPiece choice = option.substr(strlen("--register-allocation-strategy=")).data();
- if (choice == "linear-scan") {
- register_allocation_strategy_ = RegisterAllocator::Strategy::kRegisterAllocatorLinearScan;
- } else if (choice == "graph-color") {
- register_allocation_strategy_ = RegisterAllocator::Strategy::kRegisterAllocatorGraphColor;
- } else {
- Usage("Unrecognized register allocation strategy. Try linear-scan, or graph-color.");
- }
-}
-
-bool CompilerOptions::ParseCompilerOption(const StringPiece& option, UsageFn Usage) {
- if (option.starts_with("--compiler-filter=")) {
- const char* compiler_filter_string = option.substr(strlen("--compiler-filter=")).data();
- if (!CompilerFilter::ParseCompilerFilter(compiler_filter_string, &compiler_filter_)) {
- Usage("Unknown --compiler-filter value %s", compiler_filter_string);
- }
- } else if (option == "--compile-pic") {
- compile_pic_ = true;
- } else if (option.starts_with("--huge-method-max=")) {
- ParseHugeMethodMax(option, Usage);
- } else if (option.starts_with("--large-method-max=")) {
- ParseLargeMethodMax(option, Usage);
- } else if (option.starts_with("--small-method-max=")) {
- ParseSmallMethodMax(option, Usage);
- } else if (option.starts_with("--tiny-method-max=")) {
- ParseTinyMethodMax(option, Usage);
- } else if (option.starts_with("--num-dex-methods=")) {
- ParseNumDexMethods(option, Usage);
- } else if (option.starts_with("--inline-max-code-units=")) {
- ParseInlineMaxCodeUnits(option, Usage);
- } else if (option == "--generate-debug-info" || option == "-g") {
- generate_debug_info_ = true;
- } else if (option == "--no-generate-debug-info") {
- generate_debug_info_ = false;
- } else if (option == "--generate-mini-debug-info") {
- generate_mini_debug_info_ = true;
- } else if (option == "--no-generate-mini-debug-info") {
- generate_mini_debug_info_ = false;
- } else if (option == "--generate-build-id") {
- generate_build_id_ = true;
- } else if (option == "--no-generate-build-id") {
- generate_build_id_ = false;
- } else if (option == "--debuggable") {
- debuggable_ = true;
- } else if (option.starts_with("--top-k-profile-threshold=")) {
- ParseDouble(option.data(), '=', 0.0, 100.0, &top_k_profile_threshold_, Usage);
- } else if (option == "--abort-on-hard-verifier-error") {
- abort_on_hard_verifier_failure_ = true;
- } else if (option == "--no-abort-on-hard-verifier-error") {
- abort_on_hard_verifier_failure_ = false;
- } else if (option.starts_with("--dump-init-failures=")) {
- ParseDumpInitFailures(option, Usage);
- } else if (option.starts_with("--dump-cfg=")) {
- dump_cfg_file_name_ = option.substr(strlen("--dump-cfg=")).data();
- } else if (option == "--dump-cfg-append") {
- dump_cfg_append_ = true;
- } else if (option.starts_with("--register-allocation-strategy=")) {
- ParseRegisterAllocationStrategy(option, Usage);
- } else if (option.starts_with("--verbose-methods=")) {
- // TODO: rather than switch off compiler logging, make all VLOG(compiler) messages
- // conditional on having verbose methods.
- gLogVerbosity.compiler = false;
- Split(option.substr(strlen("--verbose-methods=")).ToString(), ',', &verbose_methods_);
- } else {
- // Option not recognized.
return false;
}
return true;
}
+bool CompilerOptions::ParseRegisterAllocationStrategy(const std::string& option,
+ std::string* error_msg) {
+ if (option == "linear-scan") {
+ register_allocation_strategy_ = RegisterAllocator::Strategy::kRegisterAllocatorLinearScan;
+ } else if (option == "graph-color") {
+ register_allocation_strategy_ = RegisterAllocator::Strategy::kRegisterAllocatorGraphColor;
+ } else {
+ *error_msg = "Unrecognized register allocation strategy. Try linear-scan, or graph-color.";
+ return false;
+ }
+ return true;
+}
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wframe-larger-than="
+
+bool CompilerOptions::ParseCompilerOptions(const std::vector<std::string>& options,
+ bool ignore_unrecognized,
+ std::string* error_msg) {
+ auto parser = CreateSimpleParser(ignore_unrecognized);
+ CmdlineResult parse_result = parser.Parse(options);
+ if (!parse_result.IsSuccess()) {
+ *error_msg = parse_result.GetMessage();
+ return false;
+ }
+
+ SimpleParseArgumentMap args = parser.ReleaseArgumentsMap();
+ return ReadCompilerOptions(args, this, error_msg);
+}
+
+#pragma GCC diagnostic pop
+
} // namespace art
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index a9372c4..311dbd5 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -231,7 +231,9 @@
return no_inline_from_;
}
- bool ParseCompilerOption(const StringPiece& option, UsageFn Usage);
+ bool ParseCompilerOptions(const std::vector<std::string>& options,
+ bool ignore_unrecognized,
+ std::string* error_msg);
void SetNonPic() {
compile_pic_ = false;
@@ -258,7 +260,7 @@
}
private:
- void ParseDumpInitFailures(const StringPiece& option, UsageFn Usage);
+ bool ParseDumpInitFailures(const std::string& option, std::string* error_msg);
void ParseDumpCfgPasses(const StringPiece& option, UsageFn Usage);
void ParseInlineMaxCodeUnits(const StringPiece& option, UsageFn Usage);
void ParseNumDexMethods(const StringPiece& option, UsageFn Usage);
@@ -266,7 +268,7 @@
void ParseSmallMethodMax(const StringPiece& option, UsageFn Usage);
void ParseLargeMethodMax(const StringPiece& option, UsageFn Usage);
void ParseHugeMethodMax(const StringPiece& option, UsageFn Usage);
- void ParseRegisterAllocationStrategy(const StringPiece& option, UsageFn Usage);
+ bool ParseRegisterAllocationStrategy(const std::string& option, std::string* error_msg);
CompilerFilter::Filter compiler_filter_;
size_t huge_method_threshold_;
@@ -327,6 +329,9 @@
friend class CommonCompilerTest;
friend class verifier::VerifierDepsTest;
+ template <class Base>
+ friend bool ReadCompilerOptions(Base& map, CompilerOptions* options, std::string* error_msg);
+
DISALLOW_COPY_AND_ASSIGN(CompilerOptions);
};
diff --git a/compiler/driver/compiler_options_map-inl.h b/compiler/driver/compiler_options_map-inl.h
new file mode 100644
index 0000000..9cb818a
--- /dev/null
+++ b/compiler/driver/compiler_options_map-inl.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_INL_H_
+#define ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_INL_H_
+
+#include "compiler_options_map.h"
+
+#include <memory>
+
+#include "android-base/logging.h"
+#include "android-base/macros.h"
+#include "android-base/stringprintf.h"
+
+#include "base/macros.h"
+#include "cmdline_parser.h"
+#include "compiler_options.h"
+
+namespace art {
+
+template <class Base>
+inline bool ReadCompilerOptions(Base& map, CompilerOptions* options, std::string* error_msg) {
+ if (map.Exists(Base::CompilerFilter)) {
+ CompilerFilter::Filter compiler_filter;
+ if (!CompilerFilter::ParseCompilerFilter(map.Get(Base::CompilerFilter)->c_str(),
+ &compiler_filter)) {
+ *error_msg = android::base::StringPrintf("Unknown --compiler-filter value %s",
+ map.Get(Base::CompilerFilter)->c_str());
+ return false;
+ }
+ options->SetCompilerFilter(compiler_filter);
+ }
+ if (map.Exists(Base::PIC)) {
+ options->compile_pic_ = true;
+ }
+ map.AssignIfExists(Base::HugeMethodMaxThreshold, &options->huge_method_threshold_);
+ map.AssignIfExists(Base::LargeMethodMaxThreshold, &options->large_method_threshold_);
+ map.AssignIfExists(Base::SmallMethodMaxThreshold, &options->small_method_threshold_);
+ map.AssignIfExists(Base::TinyMethodMaxThreshold, &options->tiny_method_threshold_);
+ map.AssignIfExists(Base::NumDexMethodsThreshold, &options->num_dex_methods_threshold_);
+ map.AssignIfExists(Base::InlineMaxCodeUnitsThreshold, &options->inline_max_code_units_);
+ map.AssignIfExists(Base::GenerateDebugInfo, &options->generate_debug_info_);
+ map.AssignIfExists(Base::GenerateMiniDebugInfo, &options->generate_mini_debug_info_);
+ map.AssignIfExists(Base::GenerateBuildID, &options->generate_build_id_);
+ if (map.Exists(Base::Debuggable)) {
+ options->debuggable_ = true;
+ }
+ map.AssignIfExists(Base::TopKProfileThreshold, &options->top_k_profile_threshold_);
+ map.AssignIfExists(Base::AbortOnHardVerifierFailure, &options->abort_on_hard_verifier_failure_);
+ if (map.Exists(Base::DumpInitFailures)) {
+ if (!options->ParseDumpInitFailures(*map.Get(Base::DumpInitFailures), error_msg)) {
+ return false;
+ }
+ }
+ map.AssignIfExists(Base::DumpCFG, &options->dump_cfg_file_name_);
+ if (map.Exists(Base::DumpCFGAppend)) {
+ options->dump_cfg_append_ = true;
+ }
+ if (map.Exists(Base::RegisterAllocationStrategy)) {
+ if (!options->ParseRegisterAllocationStrategy(*map.Get(Base::DumpInitFailures), error_msg)) {
+ return false;
+ }
+ }
+ map.AssignIfExists(Base::VerboseMethods, &options->verbose_methods_);
+
+ return true;
+}
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wframe-larger-than="
+
+template <typename Map, typename Builder>
+inline void AddCompilerOptionsArgumentParserOptions(Builder& b) {
+ b.
+ Define("--compiler-filter=_")
+ .template WithType<std::string>()
+ .IntoKey(Map::CompilerFilter)
+
+ .Define("--compile-pic")
+ .IntoKey(Map::PIC)
+
+ .Define("--huge-method-max=_")
+ .template WithType<unsigned int>()
+ .IntoKey(Map::HugeMethodMaxThreshold)
+ .Define("--large-method-max=_")
+ .template WithType<unsigned int>()
+ .IntoKey(Map::LargeMethodMaxThreshold)
+ .Define("--small-method-max=_")
+ .template WithType<unsigned int>()
+ .IntoKey(Map::SmallMethodMaxThreshold)
+ .Define("--tiny-method-max=_")
+ .template WithType<unsigned int>()
+ .IntoKey(Map::TinyMethodMaxThreshold)
+ .Define("--num-dex-methods=_")
+ .template WithType<unsigned int>()
+ .IntoKey(Map::NumDexMethodsThreshold)
+ .Define("--inline-max-code-units=_")
+ .template WithType<unsigned int>()
+ .IntoKey(Map::InlineMaxCodeUnitsThreshold)
+
+ .Define({"--generate-debug-info", "-g", "--no-generate-debug-info"})
+ .WithValues({true, true, false})
+ .IntoKey(Map::GenerateDebugInfo)
+ .Define({"--generate-mini-debug-info", "--no-generate-mini-debug-info"})
+ .WithValues({true, false})
+ .IntoKey(Map::GenerateMiniDebugInfo)
+
+ .Define({"--generate-build-id", "--no-generate-build-id"})
+ .WithValues({true, false})
+ .IntoKey(Map::GenerateBuildID)
+
+ .Define("--debuggable")
+ .IntoKey(Map::Debuggable)
+
+ .Define("--top-k-profile-threshold=_")
+ .template WithType<double>().WithRange(0.0, 100.0)
+ .IntoKey(Map::TopKProfileThreshold)
+
+ .Define({"--abort-on-hard-verifier-error", "--no-abort-on-hard-verifier-error"})
+ .WithValues({true, false})
+ .IntoKey(Map::AbortOnHardVerifierFailure)
+
+ .Define("--dump-init-failures=_")
+ .template WithType<std::string>()
+ .IntoKey(Map::DumpInitFailures)
+
+ .Define("--dump-cfg=_")
+ .template WithType<std::string>()
+ .IntoKey(Map::DumpCFG)
+ .Define("--dump-cfg-append")
+ .IntoKey(Map::DumpCFGAppend)
+
+ .Define("--register-allocation-strategy=_")
+ .template WithType<std::string>()
+ .IntoKey(Map::RegisterAllocationStrategy)
+
+ .Define("--verbose-methods=_")
+ .template WithType<ParseStringList<','>>()
+ .IntoKey(Map::VerboseMethods);
+}
+
+#pragma GCC diagnostic pop
+
+} // namespace art
+
+#endif // ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_INL_H_
diff --git a/compiler/driver/compiler_options_map-storage.h b/compiler/driver/compiler_options_map-storage.h
new file mode 100644
index 0000000..756598d
--- /dev/null
+++ b/compiler/driver/compiler_options_map-storage.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_STORAGE_H_
+#define ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_STORAGE_H_
+
+// Assumes:
+// * #include "compiler_options_map.h"
+// * namespace art
+//
+// Usage:
+// #define COMPILER_OPTIONS_MAP_TYPE TheTypeOfTheMap
+// #define COMPILER_OPTIONS_MAP_KEY_TYPE TheTypeOfTheMapsKey
+// #include "driver/compiler_options_map-storage.h
+
+#ifndef COMPILER_OPTIONS_MAP_TYPE
+#error "Expected COMPILER_OPTIONS_MAP_TYPE"
+#endif
+
+#ifndef COMPILER_OPTIONS_MAP_KEY_TYPE
+#error "Expected COMPILER_OPTIONS_MAP_KEY_TYPE"
+#endif
+
+#define COMPILER_OPTIONS_KEY(Type, Name, ...) \
+ template <typename Base, template <typename TV> class KeyType> \
+ const KeyType<Type> CompilerOptionsMap<Base, KeyType>::Name {__VA_ARGS__}; // NOLINT [readability/braces] [4]
+#include <driver/compiler_options_map.def>
+
+template struct CompilerOptionsMap<COMPILER_OPTIONS_MAP_TYPE, COMPILER_OPTIONS_MAP_KEY_TYPE>;
+
+#undef COMPILER_OPTIONS_MAP_TYPE
+#undef COMPILER_OPTIONS_MAP_KEY_TYPE
+
+#endif // ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_STORAGE_H_
+#undef ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_STORAGE_H_ // Guard is only for cpplint
diff --git a/compiler/driver/compiler_options_map.def b/compiler/driver/compiler_options_map.def
new file mode 100644
index 0000000..570bc5a
--- /dev/null
+++ b/compiler/driver/compiler_options_map.def
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License")
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef COMPILER_OPTIONS_KEY
+#error "Please #define COMPILER_OPTIONS_KEY before #including this file"
+#define COMPILER_OPTIONS_KEY(...) // Don't display errors in this file in IDEs.
+#endif
+
+// This file defines the list of keys for CompilerOptionsMap.
+// These can be used with CompilerOptionsMap.Get/Set/etc, once that template class has been
+// instantiated.
+//
+// Column Descriptions:
+// <<Type>> <<Key Name>> (<<Default Value>>)
+//
+// Default values are only used by Map::GetOrDefault(K<T>).
+// If a default value is omitted here, T{} is used as the default value, which is
+// almost-always the value of the type as if it was memset to all 0.
+//
+// Please keep the columns aligned if possible when adding new rows.
+//
+
+// Parse-able keys from the command line.
+
+// TODO: Add type parser.
+COMPILER_OPTIONS_KEY (std::string, CompilerFilter)
+COMPILER_OPTIONS_KEY (Unit, PIC)
+COMPILER_OPTIONS_KEY (unsigned int, HugeMethodMaxThreshold)
+COMPILER_OPTIONS_KEY (unsigned int, LargeMethodMaxThreshold)
+COMPILER_OPTIONS_KEY (unsigned int, SmallMethodMaxThreshold)
+COMPILER_OPTIONS_KEY (unsigned int, TinyMethodMaxThreshold)
+COMPILER_OPTIONS_KEY (unsigned int, NumDexMethodsThreshold)
+COMPILER_OPTIONS_KEY (unsigned int, InlineMaxCodeUnitsThreshold)
+COMPILER_OPTIONS_KEY (bool, GenerateDebugInfo)
+COMPILER_OPTIONS_KEY (bool, GenerateMiniDebugInfo)
+COMPILER_OPTIONS_KEY (bool, GenerateBuildID)
+COMPILER_OPTIONS_KEY (Unit, Debuggable)
+COMPILER_OPTIONS_KEY (double, TopKProfileThreshold)
+COMPILER_OPTIONS_KEY (bool, AbortOnHardVerifierFailure)
+COMPILER_OPTIONS_KEY (std::string, DumpInitFailures)
+COMPILER_OPTIONS_KEY (std::string, DumpCFG)
+COMPILER_OPTIONS_KEY (Unit, DumpCFGAppend)
+// TODO: Add type parser.
+COMPILER_OPTIONS_KEY (std::string, RegisterAllocationStrategy)
+COMPILER_OPTIONS_KEY (ParseStringList<','>, VerboseMethods)
+
+#undef COMPILER_OPTIONS_KEY
diff --git a/compiler/driver/compiler_options_map.h b/compiler/driver/compiler_options_map.h
new file mode 100644
index 0000000..b9bc8b6
--- /dev/null
+++ b/compiler/driver/compiler_options_map.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_H_
+#define ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_H_
+
+#include <string>
+#include <vector>
+
+#include "base/variant_map.h"
+#include "cmdline_types.h"
+
+namespace art {
+
+// Defines a type-safe heterogeneous key->value map. This is to be used as the base for
+// an extended map.
+template <typename Base, template <typename TV> class KeyType>
+struct CompilerOptionsMap : VariantMap<Base, KeyType> {
+ // Make the next many usages of Key slightly shorter to type.
+ template <typename TValue>
+ using Key = KeyType<TValue>;
+
+ // List of key declarations, shorthand for 'static const Key<T> Name'
+#define COMPILER_OPTIONS_KEY(Type, Name, ...) static const Key<Type> (Name);
+#include "compiler_options_map.def"
+};
+
+#undef DECLARE_KEY
+
+} // namespace art
+
+#endif // ART_COMPILER_DRIVER_COMPILER_OPTIONS_MAP_H_
diff --git a/compiler/driver/simple_compiler_options_map.h b/compiler/driver/simple_compiler_options_map.h
new file mode 100644
index 0000000..3860da9
--- /dev/null
+++ b/compiler/driver/simple_compiler_options_map.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This file declares a completion of the CompilerOptionsMap and should be included into a
+// .cc file, only.
+
+#ifndef ART_COMPILER_DRIVER_SIMPLE_COMPILER_OPTIONS_MAP_H_
+#define ART_COMPILER_DRIVER_SIMPLE_COMPILER_OPTIONS_MAP_H_
+
+#include <memory>
+
+#include "compiler_options_map-inl.h"
+#include "base/variant_map.h"
+
+namespace art {
+
+template <typename TValue>
+struct SimpleParseArgumentMapKey : VariantMapKey<TValue> {
+ SimpleParseArgumentMapKey() {}
+ explicit SimpleParseArgumentMapKey(TValue default_value)
+ : VariantMapKey<TValue>(std::move(default_value)) {}
+ // Don't ODR-use constexpr default values, which means that Struct::Fields
+ // that are declared 'static constexpr T Name = Value' don't need to have a matching definition.
+};
+
+struct SimpleParseArgumentMap : CompilerOptionsMap<SimpleParseArgumentMap,
+ SimpleParseArgumentMapKey> {
+ // This 'using' line is necessary to inherit the variadic constructor.
+ using CompilerOptionsMap<SimpleParseArgumentMap, SimpleParseArgumentMapKey>::CompilerOptionsMap;
+};
+
+#define COMPILER_OPTIONS_MAP_TYPE SimpleParseArgumentMap
+#define COMPILER_OPTIONS_MAP_KEY_TYPE SimpleParseArgumentMapKey
+#include "compiler_options_map-storage.h"
+
+using Parser = CmdlineParser<SimpleParseArgumentMap, SimpleParseArgumentMapKey>;
+
+static inline Parser CreateSimpleParser(bool ignore_unrecognized) {
+ std::unique_ptr<Parser::Builder> parser_builder =
+ std::unique_ptr<Parser::Builder>(new Parser::Builder());
+
+ AddCompilerOptionsArgumentParserOptions<SimpleParseArgumentMap>(*parser_builder);
+
+ parser_builder->IgnoreUnrecognized(ignore_unrecognized);
+
+ return parser_builder->Build();
+}
+
+} // namespace art
+
+#endif // ART_COMPILER_DRIVER_SIMPLE_COMPILER_OPTIONS_MAP_H_
diff --git a/compiler/intrinsics_list.h b/compiler/intrinsics_list.h
index c8a0119..bfefead 100644
--- a/compiler/intrinsics_list.h
+++ b/compiler/intrinsics_list.h
@@ -158,7 +158,12 @@
V(UnsafeFullFence, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "fullFence", "()V") \
V(ReferenceGetReferent, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/ref/Reference;", "getReferent", "()Ljava/lang/Object;") \
V(IntegerValueOf, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Integer;", "valueOf", "(I)Ljava/lang/Integer;") \
- V(ThreadInterrupted, kStatic, kNeedsEnvironmentOrCache, kAllSideEffects, kNoThrow, "Ljava/lang/Thread;", "interrupted", "()Z")
+ V(ThreadInterrupted, kStatic, kNeedsEnvironmentOrCache, kAllSideEffects, kNoThrow, "Ljava/lang/Thread;", "interrupted", "()Z") \
+ V(VarHandleFullFence, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kNoThrow, "Ljava/lang/invoke/VarHandle;", "fullFence", "()V") \
+ V(VarHandleAcquireFence, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kNoThrow, "Ljava/lang/invoke/VarHandle;", "acquireFence", "()V") \
+ V(VarHandleReleaseFence, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kNoThrow, "Ljava/lang/invoke/VarHandle;", "releaseFence", "()V") \
+ V(VarHandleLoadLoadFence, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kNoThrow, "Ljava/lang/invoke/VarHandle;", "loadLoadFence", "()V") \
+ V(VarHandleStoreStoreFence, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/lang/invoke/VarHandle;", "storeStoreFence", "()V") \
#endif // ART_COMPILER_INTRINSICS_LIST_H_
#undef ART_COMPILER_INTRINSICS_LIST_H_ // #define is only for lint.
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 511a44a..5c89869 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -78,21 +78,16 @@
}
}
-// Callers of this method assume it has NO_RETURN.
-NO_RETURN static void Usage(const char* fmt, ...) {
- va_list ap;
- va_start(ap, fmt);
- std::string error;
- android::base::StringAppendV(&error, fmt, ap);
- LOG(FATAL) << error;
- va_end(ap);
- exit(EXIT_FAILURE);
-}
-
JitCompiler::JitCompiler() {
compiler_options_.reset(new CompilerOptions());
- for (const std::string& argument : Runtime::Current()->GetCompilerOptions()) {
- compiler_options_->ParseCompilerOption(argument, Usage);
+ {
+ std::string error_msg;
+ if (!compiler_options_->ParseCompilerOptions(Runtime::Current()->GetCompilerOptions(),
+ true /* ignore_unrecognized */,
+ &error_msg)) {
+ LOG(FATAL) << error_msg;
+ UNREACHABLE();
+ }
}
// JIT is never PIC, no matter what the runtime compiler options specify.
compiler_options_->SetNonPic();
diff --git a/compiler/jni/jni_cfi_test.cc b/compiler/jni/jni_cfi_test.cc
index 347f4ea..28709a1 100644
--- a/compiler/jni/jni_cfi_test.cc
+++ b/compiler/jni/jni_cfi_test.cc
@@ -84,7 +84,7 @@
callee_save_regs, mr_conv->EntrySpills());
jni_asm->IncreaseFrameSize(32);
jni_asm->DecreaseFrameSize(32);
- jni_asm->RemoveFrame(frame_size, callee_save_regs);
+ jni_asm->RemoveFrame(frame_size, callee_save_regs, /* may_suspend */ true);
jni_asm->FinalizeCode();
std::vector<uint8_t> actual_asm(jni_asm->CodeSize());
MemoryRegion code(&actual_asm[0], actual_asm.size());
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc
index 292ce10..3afd701 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.cc
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc
@@ -110,23 +110,31 @@
// Calling convention
ManagedRegister Arm64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
// X20 is safe to use as a scratch register:
- // - with Baker read barriers, it is reserved as Marking Register,
- // and thus does not actually need to be saved/restored; it is
- // refreshed on exit (see Arm64JNIMacroAssembler::RemoveFrame);
+ // - with Baker read barriers (in the case of a non-critical native
+ // method), it is reserved as Marking Register, and thus does not
+ // actually need to be saved/restored; it is refreshed on exit
+ // (see Arm64JNIMacroAssembler::RemoveFrame);
// - in other cases, it is saved on entry (in
// Arm64JNIMacroAssembler::BuildFrame) and restored on exit (in
- // Arm64JNIMacroAssembler::RemoveFrame).
+ // Arm64JNIMacroAssembler::RemoveFrame). This is also expected in
+ // the case of a critical native method in the Baker read barrier
+ // configuration, where the value of MR must be preserved across
+ // the JNI call (as there is no MR refresh in that case).
return Arm64ManagedRegister::FromXRegister(X20);
}
ManagedRegister Arm64JniCallingConvention::InterproceduralScratchRegister() {
// X20 is safe to use as a scratch register:
- // - with Baker read barriers, it is reserved as Marking Register,
- // and thus does not actually need to be saved/restored; it is
- // refreshed on exit (see Arm64JNIMacroAssembler::RemoveFrame);
+ // - with Baker read barriers (in the case of a non-critical native
+ // method), it is reserved as Marking Register, and thus does not
+ // actually need to be saved/restored; it is refreshed on exit
+ // (see Arm64JNIMacroAssembler::RemoveFrame);
// - in other cases, it is saved on entry (in
// Arm64JNIMacroAssembler::BuildFrame) and restored on exit (in
- // Arm64JNIMacroAssembler::RemoveFrame).
+ // Arm64JNIMacroAssembler::RemoveFrame). This is also expected in
+ // the case of a critical native method in the Baker read barrier
+ // configuration, where the value of MR must be preserved across
+ // the JNI call (as there is no MR refresh in that case).
return Arm64ManagedRegister::FromXRegister(X20);
}
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index 36a87a8..42a5f86 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -47,7 +47,7 @@
// Managed runtime calling convention
std::unique_ptr<ManagedRuntimeCallingConvention> ManagedRuntimeCallingConvention::Create(
- ArenaAllocator* arena,
+ ArenaAllocator* allocator,
bool is_static,
bool is_synchronized,
const char* shorty,
@@ -57,35 +57,37 @@
case kArm:
case kThumb2:
return std::unique_ptr<ManagedRuntimeCallingConvention>(
- new (arena) arm::ArmManagedRuntimeCallingConvention(is_static, is_synchronized, shorty));
+ new (allocator) arm::ArmManagedRuntimeCallingConvention(
+ is_static, is_synchronized, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64:
return std::unique_ptr<ManagedRuntimeCallingConvention>(
- new (arena) arm64::Arm64ManagedRuntimeCallingConvention(
+ new (allocator) arm64::Arm64ManagedRuntimeCallingConvention(
is_static, is_synchronized, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_mips
case kMips:
return std::unique_ptr<ManagedRuntimeCallingConvention>(
- new (arena) mips::MipsManagedRuntimeCallingConvention(
+ new (allocator) mips::MipsManagedRuntimeCallingConvention(
is_static, is_synchronized, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64:
return std::unique_ptr<ManagedRuntimeCallingConvention>(
- new (arena) mips64::Mips64ManagedRuntimeCallingConvention(
+ new (allocator) mips64::Mips64ManagedRuntimeCallingConvention(
is_static, is_synchronized, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_x86
case kX86:
return std::unique_ptr<ManagedRuntimeCallingConvention>(
- new (arena) x86::X86ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty));
+ new (allocator) x86::X86ManagedRuntimeCallingConvention(
+ is_static, is_synchronized, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64:
return std::unique_ptr<ManagedRuntimeCallingConvention>(
- new (arena) x86_64::X86_64ManagedRuntimeCallingConvention(
+ new (allocator) x86_64::X86_64ManagedRuntimeCallingConvention(
is_static, is_synchronized, shorty));
#endif
default:
@@ -146,7 +148,7 @@
// JNI calling convention
-std::unique_ptr<JniCallingConvention> JniCallingConvention::Create(ArenaAllocator* arena,
+std::unique_ptr<JniCallingConvention> JniCallingConvention::Create(ArenaAllocator* allocator,
bool is_static,
bool is_synchronized,
bool is_critical_native,
@@ -157,50 +159,38 @@
case kArm:
case kThumb2:
return std::unique_ptr<JniCallingConvention>(
- new (arena) arm::ArmJniCallingConvention(is_static,
- is_synchronized,
- is_critical_native,
- shorty));
+ new (allocator) arm::ArmJniCallingConvention(
+ is_static, is_synchronized, is_critical_native, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64:
return std::unique_ptr<JniCallingConvention>(
- new (arena) arm64::Arm64JniCallingConvention(is_static,
- is_synchronized,
- is_critical_native,
- shorty));
+ new (allocator) arm64::Arm64JniCallingConvention(
+ is_static, is_synchronized, is_critical_native, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_mips
case kMips:
return std::unique_ptr<JniCallingConvention>(
- new (arena) mips::MipsJniCallingConvention(is_static,
- is_synchronized,
- is_critical_native,
- shorty));
+ new (allocator) mips::MipsJniCallingConvention(
+ is_static, is_synchronized, is_critical_native, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64:
return std::unique_ptr<JniCallingConvention>(
- new (arena) mips64::Mips64JniCallingConvention(is_static,
- is_synchronized,
- is_critical_native,
- shorty));
+ new (allocator) mips64::Mips64JniCallingConvention(
+ is_static, is_synchronized, is_critical_native, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_x86
case kX86:
return std::unique_ptr<JniCallingConvention>(
- new (arena) x86::X86JniCallingConvention(is_static,
- is_synchronized,
- is_critical_native,
- shorty));
+ new (allocator) x86::X86JniCallingConvention(
+ is_static, is_synchronized, is_critical_native, shorty));
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64:
return std::unique_ptr<JniCallingConvention>(
- new (arena) x86_64::X86_64JniCallingConvention(is_static,
- is_synchronized,
- is_critical_native,
- shorty));
+ new (allocator) x86_64::X86_64JniCallingConvention(
+ is_static, is_synchronized, is_critical_native, shorty));
#endif
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
index 335a2df..be0bd72 100644
--- a/compiler/jni/quick/calling_convention.h
+++ b/compiler/jni/quick/calling_convention.h
@@ -231,7 +231,7 @@
// | { Method* } | <-- SP
class ManagedRuntimeCallingConvention : public CallingConvention {
public:
- static std::unique_ptr<ManagedRuntimeCallingConvention> Create(ArenaAllocator* arena,
+ static std::unique_ptr<ManagedRuntimeCallingConvention> Create(ArenaAllocator* allocator,
bool is_static,
bool is_synchronized,
const char* shorty,
@@ -284,7 +284,7 @@
// callee saves for frames above this one.
class JniCallingConvention : public CallingConvention {
public:
- static std::unique_ptr<JniCallingConvention> Create(ArenaAllocator* arena,
+ static std::unique_ptr<JniCallingConvention> Create(ArenaAllocator* allocator,
bool is_static,
bool is_synchronized,
bool is_critical_native,
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index c66a2a6..92b5c4d 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -66,8 +66,8 @@
template <PointerSize kPointerSize>
static std::unique_ptr<JNIMacroAssembler<kPointerSize>> GetMacroAssembler(
- ArenaAllocator* arena, InstructionSet isa, const InstructionSetFeatures* features) {
- return JNIMacroAssembler<kPointerSize>::Create(arena, isa, features);
+ ArenaAllocator* allocator, InstructionSet isa, const InstructionSetFeatures* features) {
+ return JNIMacroAssembler<kPointerSize>::Create(allocator, isa, features);
}
enum class JniEntrypoint {
@@ -646,7 +646,10 @@
// 16. Remove activation - need to restore callee save registers since the GC may have changed
// them.
DCHECK_EQ(jni_asm->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size));
- __ RemoveFrame(frame_size, callee_save_regs);
+ // We expect the compiled method to possibly be suspended during its
+ // execution, except in the case of a CriticalNative method.
+ bool may_suspend = !is_critical_native;
+ __ RemoveFrame(frame_size, callee_save_regs, may_suspend);
DCHECK_EQ(jni_asm->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size));
// 17. Finalize code generation
diff --git a/compiler/optimizing/block_builder.h b/compiler/optimizing/block_builder.h
index 6adce81..4a0f78c 100644
--- a/compiler/optimizing/block_builder.h
+++ b/compiler/optimizing/block_builder.h
@@ -29,7 +29,7 @@
HBasicBlockBuilder(HGraph* graph,
const DexFile* const dex_file,
const DexFile::CodeItem& code_item)
- : arena_(graph->GetArena()),
+ : arena_(graph->GetAllocator()),
graph_(graph),
dex_file_(dex_file),
code_item_(code_item),
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index a7f7bce..0255e73 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -513,18 +513,18 @@
maps_(graph->GetBlocks().size(),
ArenaSafeMap<int, ValueRange*>(
std::less<int>(),
- graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
- graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
+ graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)),
+ graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)),
first_index_bounds_check_map_(
std::less<int>(),
- graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
+ graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)),
early_exit_loop_(
std::less<uint32_t>(),
- graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
+ graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)),
taken_test_loop_(
std::less<uint32_t>(),
- graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
- finite_loop_(graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
+ graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)),
+ finite_loop_(graph->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination)),
has_dom_based_dynamic_bce_(false),
initial_block_size_(graph->GetBlocks().size()),
side_effects_(side_effects),
@@ -668,8 +668,8 @@
if (successor != nullptr) {
bool overflow;
bool underflow;
- ValueRange* new_left_range = new (GetGraph()->GetArena()) ValueRange(
- GetGraph()->GetArena(),
+ ValueRange* new_left_range = new (GetGraph()->GetAllocator()) ValueRange(
+ GetGraph()->GetAllocator(),
left_range->GetBound(),
right_range->GetBound().Add(left_compensation, &overflow, &underflow));
if (!overflow && !underflow) {
@@ -677,8 +677,8 @@
new_left_range);
}
- ValueRange* new_right_range = new (GetGraph()->GetArena()) ValueRange(
- GetGraph()->GetArena(),
+ ValueRange* new_right_range = new (GetGraph()->GetAllocator()) ValueRange(
+ GetGraph()->GetAllocator(),
left_range->GetBound().Add(right_compensation, &overflow, &underflow),
right_range->GetBound());
if (!overflow && !underflow) {
@@ -750,8 +750,8 @@
if (overflow || underflow) {
return;
}
- ValueRange* new_range = new (GetGraph()->GetArena())
- ValueRange(GetGraph()->GetArena(), ValueBound::Min(), new_upper);
+ ValueRange* new_range = new (GetGraph()->GetAllocator())
+ ValueRange(GetGraph()->GetAllocator(), ValueBound::Min(), new_upper);
ApplyRangeFromComparison(left, block, true_successor, new_range);
}
@@ -762,8 +762,8 @@
if (overflow || underflow) {
return;
}
- ValueRange* new_range = new (GetGraph()->GetArena())
- ValueRange(GetGraph()->GetArena(), new_lower, ValueBound::Max());
+ ValueRange* new_range = new (GetGraph()->GetAllocator())
+ ValueRange(GetGraph()->GetAllocator(), new_lower, ValueBound::Max());
ApplyRangeFromComparison(left, block, false_successor, new_range);
}
} else if (cond == kCondGT || cond == kCondGE) {
@@ -774,8 +774,8 @@
if (overflow || underflow) {
return;
}
- ValueRange* new_range = new (GetGraph()->GetArena())
- ValueRange(GetGraph()->GetArena(), new_lower, ValueBound::Max());
+ ValueRange* new_range = new (GetGraph()->GetAllocator())
+ ValueRange(GetGraph()->GetAllocator(), new_lower, ValueBound::Max());
ApplyRangeFromComparison(left, block, true_successor, new_range);
}
@@ -785,8 +785,8 @@
if (overflow || underflow) {
return;
}
- ValueRange* new_range = new (GetGraph()->GetArena())
- ValueRange(GetGraph()->GetArena(), ValueBound::Min(), new_upper);
+ ValueRange* new_range = new (GetGraph()->GetAllocator())
+ ValueRange(GetGraph()->GetAllocator(), ValueBound::Min(), new_upper);
ApplyRangeFromComparison(left, block, false_successor, new_range);
}
} else if (cond == kCondNE || cond == kCondEQ) {
@@ -795,8 +795,8 @@
// length == [c,d] yields [c, d] along true
// length != [c,d] yields [c, d] along false
if (!lower.Equals(ValueBound::Min()) || !upper.Equals(ValueBound::Max())) {
- ValueRange* new_range = new (GetGraph()->GetArena())
- ValueRange(GetGraph()->GetArena(), lower, upper);
+ ValueRange* new_range = new (GetGraph()->GetAllocator())
+ ValueRange(GetGraph()->GetAllocator(), lower, upper);
ApplyRangeFromComparison(
left, block, cond == kCondEQ ? true_successor : false_successor, new_range);
}
@@ -804,8 +804,8 @@
// length == 0 yields [1, max] along false
// length != 0 yields [1, max] along true
if (lower.GetConstant() == 0 && upper.GetConstant() == 0) {
- ValueRange* new_range = new (GetGraph()->GetArena())
- ValueRange(GetGraph()->GetArena(), ValueBound(nullptr, 1), ValueBound::Max());
+ ValueRange* new_range = new (GetGraph()->GetAllocator())
+ ValueRange(GetGraph()->GetAllocator(), ValueBound(nullptr, 1), ValueBound::Max());
ApplyRangeFromComparison(
left, block, cond == kCondEQ ? false_successor : true_successor, new_range);
}
@@ -826,7 +826,7 @@
// Non-constant index.
ValueBound lower = ValueBound(nullptr, 0); // constant 0
ValueBound upper = ValueBound(array_length, -1); // array_length - 1
- ValueRange array_range(GetGraph()->GetArena(), lower, upper);
+ ValueRange array_range(GetGraph()->GetAllocator(), lower, upper);
// Try index range obtained by dominator-based analysis.
ValueRange* index_range = LookupValueRange(index, block);
if (index_range != nullptr && index_range->FitsIn(&array_range)) {
@@ -875,8 +875,8 @@
} else {
ValueBound lower = ValueBound(nullptr, constant + 1);
ValueBound upper = ValueBound::Max();
- ValueRange* range = new (GetGraph()->GetArena())
- ValueRange(GetGraph()->GetArena(), lower, upper);
+ ValueRange* range = new (GetGraph()->GetAllocator())
+ ValueRange(GetGraph()->GetAllocator(), lower, upper);
AssignRange(block, array_length, range);
}
}
@@ -938,8 +938,8 @@
ValueRange* range = nullptr;
if (increment == 0) {
// Add constant 0. It's really a fixed value.
- range = new (GetGraph()->GetArena()) ValueRange(
- GetGraph()->GetArena(),
+ range = new (GetGraph()->GetAllocator()) ValueRange(
+ GetGraph()->GetAllocator(),
ValueBound(initial_value, 0),
ValueBound(initial_value, 0));
} else {
@@ -959,8 +959,8 @@
bound = increment > 0 ? ValueBound::Min() : ValueBound::Max();
}
}
- range = new (GetGraph()->GetArena()) MonotonicValueRange(
- GetGraph()->GetArena(),
+ range = new (GetGraph()->GetAllocator()) MonotonicValueRange(
+ GetGraph()->GetAllocator(),
phi,
initial_value,
increment,
@@ -1039,8 +1039,8 @@
!ValueBound::WouldAddOverflowOrUnderflow(c0, -c1)) {
if ((c0 - c1) <= 0) {
// array.length + (c0 - c1) won't overflow/underflow.
- ValueRange* range = new (GetGraph()->GetArena()) ValueRange(
- GetGraph()->GetArena(),
+ ValueRange* range = new (GetGraph()->GetAllocator()) ValueRange(
+ GetGraph()->GetAllocator(),
ValueBound(nullptr, right_const - upper.GetConstant()),
ValueBound(array_length, right_const - lower.GetConstant()));
AssignRange(sub->GetBlock(), sub, range);
@@ -1087,8 +1087,8 @@
// than array_length.
return;
}
- ValueRange* range = new (GetGraph()->GetArena()) ValueRange(
- GetGraph()->GetArena(),
+ ValueRange* range = new (GetGraph()->GetAllocator()) ValueRange(
+ GetGraph()->GetAllocator(),
ValueBound(nullptr, std::numeric_limits<int32_t>::min()),
ValueBound(left, 0));
AssignRange(instruction->GetBlock(), instruction, range);
@@ -1113,8 +1113,8 @@
if (constant > 0) {
// constant serves as a mask so any number masked with it
// gets a [0, constant] value range.
- ValueRange* range = new (GetGraph()->GetArena()) ValueRange(
- GetGraph()->GetArena(),
+ ValueRange* range = new (GetGraph()->GetAllocator()) ValueRange(
+ GetGraph()->GetAllocator(),
ValueBound(nullptr, 0),
ValueBound(nullptr, constant));
AssignRange(instruction->GetBlock(), instruction, range);
@@ -1139,8 +1139,8 @@
// array[i % 10]; // index value range [0, 9]
// array[i % -10]; // index value range [0, 9]
// }
- ValueRange* right_range = new (GetGraph()->GetArena()) ValueRange(
- GetGraph()->GetArena(),
+ ValueRange* right_range = new (GetGraph()->GetAllocator()) ValueRange(
+ GetGraph()->GetAllocator(),
ValueBound(nullptr, 1 - right_const),
ValueBound(nullptr, right_const - 1));
@@ -1169,8 +1169,8 @@
if (right->IsArrayLength()) {
ValueBound lower = ValueBound::Min(); // ideally, lower should be '1-array_length'.
ValueBound upper = ValueBound(right, -1); // array_length - 1
- ValueRange* right_range = new (GetGraph()->GetArena()) ValueRange(
- GetGraph()->GetArena(),
+ ValueRange* right_range = new (GetGraph()->GetAllocator()) ValueRange(
+ GetGraph()->GetAllocator(),
lower,
upper);
ValueRange* left_range = LookupValueRange(left, instruction->GetBlock());
@@ -1195,8 +1195,8 @@
// which isn't available as an instruction yet. new_array will
// be treated the same as new_array.length when it's used in a ValueBound.
ValueBound upper = ValueBound(new_array, -right_const);
- ValueRange* range = new (GetGraph()->GetArena())
- ValueRange(GetGraph()->GetArena(), lower, upper);
+ ValueRange* range = new (GetGraph()->GetAllocator())
+ ValueRange(GetGraph()->GetAllocator(), lower, upper);
ValueRange* existing_range = LookupValueRange(left, new_array->GetBlock());
if (existing_range != nullptr) {
range = existing_range->Narrow(range);
@@ -1260,14 +1260,15 @@
if (base == nullptr) {
DCHECK_GE(min_c, 0);
} else {
- HInstruction* lower = new (GetGraph()->GetArena())
+ HInstruction* lower = new (GetGraph()->GetAllocator())
HAdd(DataType::Type::kInt32, base, GetGraph()->GetIntConstant(min_c));
- upper = new (GetGraph()->GetArena()) HAdd(DataType::Type::kInt32, base, upper);
+ upper = new (GetGraph()->GetAllocator()) HAdd(DataType::Type::kInt32, base, upper);
block->InsertInstructionBefore(lower, bounds_check);
block->InsertInstructionBefore(upper, bounds_check);
- InsertDeoptInBlock(bounds_check, new (GetGraph()->GetArena()) HAbove(lower, upper));
+ InsertDeoptInBlock(bounds_check, new (GetGraph()->GetAllocator()) HAbove(lower, upper));
}
- InsertDeoptInBlock(bounds_check, new (GetGraph()->GetArena()) HAboveOrEqual(upper, array_length));
+ InsertDeoptInBlock(
+ bounds_check, new (GetGraph()->GetAllocator()) HAboveOrEqual(upper, array_length));
// Flag that this kind of deoptimization has occurred.
has_dom_based_dynamic_bce_ = true;
}
@@ -1291,9 +1292,9 @@
int32_t min_c = base == nullptr ? 0 : value.GetConstant();
int32_t max_c = value.GetConstant();
ArenaVector<HBoundsCheck*> candidates(
- GetGraph()->GetArena()->Adapter(kArenaAllocBoundsCheckElimination));
+ GetGraph()->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination));
ArenaVector<HBoundsCheck*> standby(
- GetGraph()->GetArena()->Adapter(kArenaAllocBoundsCheckElimination));
+ GetGraph()->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination));
for (const HUseListNode<HInstruction*>& use : array_length->GetUses()) {
// Another bounds check in same or dominated block?
HInstruction* user = use.GetUser();
@@ -1377,7 +1378,7 @@
v2.is_known && (v2.a_constant == 0 || v2.a_constant == 1)) {
DCHECK(v1.a_constant == 1 || v1.instruction == nullptr);
DCHECK(v2.a_constant == 1 || v2.instruction == nullptr);
- ValueRange index_range(GetGraph()->GetArena(),
+ ValueRange index_range(GetGraph()->GetAllocator(),
ValueBound(v1.instruction, v1.b_constant),
ValueBound(v2.instruction, v2.b_constant));
// If analysis reveals a certain OOB, disable dynamic BCE. Otherwise,
@@ -1410,9 +1411,9 @@
int32_t min_c = base == nullptr ? 0 : value.GetConstant();
int32_t max_c = value.GetConstant();
ArenaVector<HBoundsCheck*> candidates(
- GetGraph()->GetArena()->Adapter(kArenaAllocBoundsCheckElimination));
+ GetGraph()->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination));
ArenaVector<HBoundsCheck*> standby(
- GetGraph()->GetArena()->Adapter(kArenaAllocBoundsCheckElimination));
+ GetGraph()->GetAllocator()->Adapter(kArenaAllocBoundsCheckElimination));
for (const HUseListNode<HInstruction*>& use : array_length->GetUses()) {
HInstruction* user = use.GetUser();
if (user->IsBoundsCheck() && loop == user->GetBlock()->GetLoopInformation()) {
@@ -1498,7 +1499,8 @@
if (min_c != max_c) {
DCHECK(min_lower == nullptr && min_upper != nullptr &&
max_lower == nullptr && max_upper != nullptr);
- InsertDeoptInLoop(loop, block, new (GetGraph()->GetArena()) HAbove(min_upper, max_upper));
+ InsertDeoptInLoop(
+ loop, block, new (GetGraph()->GetAllocator()) HAbove(min_upper, max_upper));
} else {
DCHECK(min_lower == nullptr && min_upper == nullptr &&
max_lower == nullptr && max_upper != nullptr);
@@ -1508,15 +1510,17 @@
if (min_c != max_c) {
DCHECK(min_lower != nullptr && min_upper != nullptr &&
max_lower != nullptr && max_upper != nullptr);
- InsertDeoptInLoop(loop, block, new (GetGraph()->GetArena()) HAbove(min_lower, max_lower));
+ InsertDeoptInLoop(
+ loop, block, new (GetGraph()->GetAllocator()) HAbove(min_lower, max_lower));
} else {
DCHECK(min_lower == nullptr && min_upper == nullptr &&
max_lower != nullptr && max_upper != nullptr);
}
- InsertDeoptInLoop(loop, block, new (GetGraph()->GetArena()) HAbove(max_lower, max_upper));
+ InsertDeoptInLoop(
+ loop, block, new (GetGraph()->GetAllocator()) HAbove(max_lower, max_upper));
}
InsertDeoptInLoop(
- loop, block, new (GetGraph()->GetArena()) HAboveOrEqual(max_upper, array_length));
+ loop, block, new (GetGraph()->GetAllocator()) HAboveOrEqual(max_upper, array_length));
} else {
// TODO: if rejected, avoid doing this again for subsequent instructions in this set?
}
@@ -1610,7 +1614,7 @@
TransformLoopForDeoptimizationIfNeeded(loop, needs_taken_test);
HBasicBlock* block = GetPreHeader(loop, check);
HInstruction* cond =
- new (GetGraph()->GetArena()) HEqual(array, GetGraph()->GetNullConstant());
+ new (GetGraph()->GetAllocator()) HEqual(array, GetGraph()->GetNullConstant());
InsertDeoptInLoop(loop, block, cond, /* is_null_check */ true);
ReplaceInstruction(check, array);
return true;
@@ -1685,8 +1689,8 @@
block->InsertInstructionBefore(condition, block->GetLastInstruction());
DeoptimizationKind kind =
is_null_check ? DeoptimizationKind::kLoopNullBCE : DeoptimizationKind::kLoopBoundsBCE;
- HDeoptimize* deoptimize = new (GetGraph()->GetArena()) HDeoptimize(
- GetGraph()->GetArena(), condition, kind, suspend->GetDexPc());
+ HDeoptimize* deoptimize = new (GetGraph()->GetAllocator()) HDeoptimize(
+ GetGraph()->GetAllocator(), condition, kind, suspend->GetDexPc());
block->InsertInstructionBefore(deoptimize, block->GetLastInstruction());
if (suspend->HasEnvironment()) {
deoptimize->CopyEnvironmentFromWithLoopPhiAdjustment(
@@ -1698,8 +1702,11 @@
void InsertDeoptInBlock(HBoundsCheck* bounds_check, HInstruction* condition) {
HBasicBlock* block = bounds_check->GetBlock();
block->InsertInstructionBefore(condition, bounds_check);
- HDeoptimize* deoptimize = new (GetGraph()->GetArena()) HDeoptimize(
- GetGraph()->GetArena(), condition, DeoptimizationKind::kBlockBCE, bounds_check->GetDexPc());
+ HDeoptimize* deoptimize = new (GetGraph()->GetAllocator()) HDeoptimize(
+ GetGraph()->GetAllocator(),
+ condition,
+ DeoptimizationKind::kBlockBCE,
+ bounds_check->GetDexPc());
block->InsertInstructionBefore(deoptimize, bounds_check);
deoptimize->CopyEnvironmentFrom(bounds_check->GetEnvironment());
}
@@ -1763,18 +1770,18 @@
HBasicBlock* false_block = if_block->GetSuccessors()[1]; // False successor.
// Goto instructions.
- true_block->AddInstruction(new (GetGraph()->GetArena()) HGoto());
- false_block->AddInstruction(new (GetGraph()->GetArena()) HGoto());
- new_preheader->AddInstruction(new (GetGraph()->GetArena()) HGoto());
+ true_block->AddInstruction(new (GetGraph()->GetAllocator()) HGoto());
+ false_block->AddInstruction(new (GetGraph()->GetAllocator()) HGoto());
+ new_preheader->AddInstruction(new (GetGraph()->GetAllocator()) HGoto());
// Insert the taken-test to see if the loop body is entered. If the
// loop isn't entered at all, it jumps around the deoptimization block.
- if_block->AddInstruction(new (GetGraph()->GetArena()) HGoto()); // placeholder
+ if_block->AddInstruction(new (GetGraph()->GetAllocator()) HGoto()); // placeholder
HInstruction* condition = induction_range_.GenerateTakenTest(
header->GetLastInstruction(), GetGraph(), if_block);
DCHECK(condition != nullptr);
if_block->RemoveInstruction(if_block->GetLastInstruction());
- if_block->AddInstruction(new (GetGraph()->GetArena()) HIf(condition));
+ if_block->AddInstruction(new (GetGraph()->GetAllocator()) HIf(condition));
taken_test_loop_.Put(loop_id, true_block);
}
@@ -1853,8 +1860,8 @@
case DataType::Type::kFloat64: zero = graph->GetDoubleConstant(0); break;
default: zero = graph->GetConstant(type, 0); break;
}
- HPhi* phi = new (graph->GetArena())
- HPhi(graph->GetArena(), kNoRegNumber, /*number_of_inputs*/ 2, HPhi::ToPhiType(type));
+ HPhi* phi = new (graph->GetAllocator())
+ HPhi(graph->GetAllocator(), kNoRegNumber, /*number_of_inputs*/ 2, HPhi::ToPhiType(type));
phi->SetRawInputAt(0, instruction);
phi->SetRawInputAt(1, zero);
if (type == DataType::Type::kReference) {
diff --git a/compiler/optimizing/bounds_check_elimination_test.cc b/compiler/optimizing/bounds_check_elimination_test.cc
index 851838c..1523478 100644
--- a/compiler/optimizing/bounds_check_elimination_test.cc
+++ b/compiler/optimizing/bounds_check_elimination_test.cc
@@ -32,10 +32,9 @@
/**
* Fixture class for the BoundsCheckElimination tests.
*/
-class BoundsCheckEliminationTest : public testing::Test {
+class BoundsCheckEliminationTest : public OptimizingUnitTest {
public:
- BoundsCheckEliminationTest() : pool_(), allocator_(&pool_) {
- graph_ = CreateGraph(&allocator_);
+ BoundsCheckEliminationTest() : graph_(CreateGraph()) {
graph_->SetHasBoundsChecks(true);
}
@@ -57,8 +56,6 @@
BoundsCheckElimination(graph_, side_effects, &induction).Run();
}
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
};
@@ -67,12 +64,12 @@
// else if (i >= array.length) { array[i] = 1; // Can't eliminate. }
// else { array[i] = 1; // Can eliminate. }
TEST_F(BoundsCheckEliminationTest, NarrowingRangeArrayBoundsElimination) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
- HInstruction* parameter1 = new (&allocator_) HParameterValue(
+ HInstruction* parameter1 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); // array
- HInstruction* parameter2 = new (&allocator_) HParameterValue(
+ HInstruction* parameter2 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); // i
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
@@ -80,70 +77,70 @@
HInstruction* constant_1 = graph_->GetIntConstant(1);
HInstruction* constant_0 = graph_->GetIntConstant(0);
- HBasicBlock* block1 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block1 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block1);
- HInstruction* cmp = new (&allocator_) HGreaterThanOrEqual(parameter2, constant_0);
- HIf* if_inst = new (&allocator_) HIf(cmp);
+ HInstruction* cmp = new (GetAllocator()) HGreaterThanOrEqual(parameter2, constant_0);
+ HIf* if_inst = new (GetAllocator()) HIf(cmp);
block1->AddInstruction(cmp);
block1->AddInstruction(if_inst);
entry->AddSuccessor(block1);
- HBasicBlock* block2 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block2 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block2);
- HNullCheck* null_check = new (&allocator_) HNullCheck(parameter1, 0);
- HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0);
- HBoundsCheck* bounds_check2 = new (&allocator_)
+ HNullCheck* null_check = new (GetAllocator()) HNullCheck(parameter1, 0);
+ HArrayLength* array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HBoundsCheck* bounds_check2 = new (GetAllocator())
HBoundsCheck(parameter2, array_length, 0);
- HArraySet* array_set = new (&allocator_) HArraySet(
+ HArraySet* array_set = new (GetAllocator()) HArraySet(
null_check, bounds_check2, constant_1, DataType::Type::kInt32, 0);
block2->AddInstruction(null_check);
block2->AddInstruction(array_length);
block2->AddInstruction(bounds_check2);
block2->AddInstruction(array_set);
- HBasicBlock* block3 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block3 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block3);
- null_check = new (&allocator_) HNullCheck(parameter1, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- cmp = new (&allocator_) HLessThan(parameter2, array_length);
- if_inst = new (&allocator_) HIf(cmp);
+ null_check = new (GetAllocator()) HNullCheck(parameter1, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ cmp = new (GetAllocator()) HLessThan(parameter2, array_length);
+ if_inst = new (GetAllocator()) HIf(cmp);
block3->AddInstruction(null_check);
block3->AddInstruction(array_length);
block3->AddInstruction(cmp);
block3->AddInstruction(if_inst);
- HBasicBlock* block4 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block4 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block4);
- null_check = new (&allocator_) HNullCheck(parameter1, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HBoundsCheck* bounds_check4 = new (&allocator_)
+ null_check = new (GetAllocator()) HNullCheck(parameter1, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HBoundsCheck* bounds_check4 = new (GetAllocator())
HBoundsCheck(parameter2, array_length, 0);
- array_set = new (&allocator_) HArraySet(
+ array_set = new (GetAllocator()) HArraySet(
null_check, bounds_check4, constant_1, DataType::Type::kInt32, 0);
block4->AddInstruction(null_check);
block4->AddInstruction(array_length);
block4->AddInstruction(bounds_check4);
block4->AddInstruction(array_set);
- HBasicBlock* block5 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block5 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block5);
- null_check = new (&allocator_) HNullCheck(parameter1, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HBoundsCheck* bounds_check5 = new (&allocator_)
+ null_check = new (GetAllocator()) HNullCheck(parameter1, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HBoundsCheck* bounds_check5 = new (GetAllocator())
HBoundsCheck(parameter2, array_length, 0);
- array_set = new (&allocator_) HArraySet(
+ array_set = new (GetAllocator()) HArraySet(
null_check, bounds_check5, constant_1, DataType::Type::kInt32, 0);
block5->AddInstruction(null_check);
block5->AddInstruction(array_length);
block5->AddInstruction(bounds_check5);
block5->AddInstruction(array_set);
- HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(exit);
block2->AddSuccessor(exit);
block4->AddSuccessor(exit);
block5->AddSuccessor(exit);
- exit->AddInstruction(new (&allocator_) HExit());
+ exit->AddInstruction(new (GetAllocator()) HExit());
block1->AddSuccessor(block3); // True successor
block1->AddSuccessor(block2); // False successor
@@ -164,12 +161,12 @@
// if (j < array.length) array[j] = 1; // Can't eliminate.
// }
TEST_F(BoundsCheckEliminationTest, OverflowArrayBoundsElimination) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
- HInstruction* parameter1 = new (&allocator_) HParameterValue(
+ HInstruction* parameter1 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); // array
- HInstruction* parameter2 = new (&allocator_) HParameterValue(
+ HInstruction* parameter2 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); // i
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
@@ -178,39 +175,40 @@
HInstruction* constant_0 = graph_->GetIntConstant(0);
HInstruction* constant_max_int = graph_->GetIntConstant(INT_MAX);
- HBasicBlock* block1 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block1 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block1);
- HInstruction* cmp = new (&allocator_) HLessThanOrEqual(parameter2, constant_0);
- HIf* if_inst = new (&allocator_) HIf(cmp);
+ HInstruction* cmp = new (GetAllocator()) HLessThanOrEqual(parameter2, constant_0);
+ HIf* if_inst = new (GetAllocator()) HIf(cmp);
block1->AddInstruction(cmp);
block1->AddInstruction(if_inst);
entry->AddSuccessor(block1);
- HBasicBlock* block2 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block2 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block2);
- HInstruction* add = new (&allocator_) HAdd(DataType::Type::kInt32, parameter2, constant_max_int);
- HNullCheck* null_check = new (&allocator_) HNullCheck(parameter1, 0);
- HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0);
- HInstruction* cmp2 = new (&allocator_) HGreaterThanOrEqual(add, array_length);
- if_inst = new (&allocator_) HIf(cmp2);
+ HInstruction* add =
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, parameter2, constant_max_int);
+ HNullCheck* null_check = new (GetAllocator()) HNullCheck(parameter1, 0);
+ HArrayLength* array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HInstruction* cmp2 = new (GetAllocator()) HGreaterThanOrEqual(add, array_length);
+ if_inst = new (GetAllocator()) HIf(cmp2);
block2->AddInstruction(add);
block2->AddInstruction(null_check);
block2->AddInstruction(array_length);
block2->AddInstruction(cmp2);
block2->AddInstruction(if_inst);
- HBasicBlock* block3 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block3 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block3);
- HBoundsCheck* bounds_check = new (&allocator_)
+ HBoundsCheck* bounds_check = new (GetAllocator())
HBoundsCheck(add, array_length, 0);
- HArraySet* array_set = new (&allocator_) HArraySet(
+ HArraySet* array_set = new (GetAllocator()) HArraySet(
null_check, bounds_check, constant_1, DataType::Type::kInt32, 0);
block3->AddInstruction(bounds_check);
block3->AddInstruction(array_set);
- HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(exit);
- exit->AddInstruction(new (&allocator_) HExit());
+ exit->AddInstruction(new (GetAllocator()) HExit());
block1->AddSuccessor(exit); // true successor
block1->AddSuccessor(block2); // false successor
block2->AddSuccessor(exit); // true successor
@@ -228,12 +226,12 @@
// if (j > 0) array[j] = 1; // Can't eliminate.
// }
TEST_F(BoundsCheckEliminationTest, UnderflowArrayBoundsElimination) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
- HInstruction* parameter1 = new (&allocator_) HParameterValue(
+ HInstruction* parameter1 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference); // array
- HInstruction* parameter2 = new (&allocator_) HParameterValue(
+ HInstruction* parameter2 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32); // i
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
@@ -242,41 +240,42 @@
HInstruction* constant_0 = graph_->GetIntConstant(0);
HInstruction* constant_max_int = graph_->GetIntConstant(INT_MAX);
- HBasicBlock* block1 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block1 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block1);
- HNullCheck* null_check = new (&allocator_) HNullCheck(parameter1, 0);
- HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0);
- HInstruction* cmp = new (&allocator_) HGreaterThanOrEqual(parameter2, array_length);
- HIf* if_inst = new (&allocator_) HIf(cmp);
+ HNullCheck* null_check = new (GetAllocator()) HNullCheck(parameter1, 0);
+ HArrayLength* array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HInstruction* cmp = new (GetAllocator()) HGreaterThanOrEqual(parameter2, array_length);
+ HIf* if_inst = new (GetAllocator()) HIf(cmp);
block1->AddInstruction(null_check);
block1->AddInstruction(array_length);
block1->AddInstruction(cmp);
block1->AddInstruction(if_inst);
entry->AddSuccessor(block1);
- HBasicBlock* block2 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block2 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block2);
- HInstruction* sub1 = new (&allocator_) HSub(DataType::Type::kInt32, parameter2, constant_max_int);
- HInstruction* sub2 = new (&allocator_) HSub(DataType::Type::kInt32, sub1, constant_max_int);
- HInstruction* cmp2 = new (&allocator_) HLessThanOrEqual(sub2, constant_0);
- if_inst = new (&allocator_) HIf(cmp2);
+ HInstruction* sub1 =
+ new (GetAllocator()) HSub(DataType::Type::kInt32, parameter2, constant_max_int);
+ HInstruction* sub2 = new (GetAllocator()) HSub(DataType::Type::kInt32, sub1, constant_max_int);
+ HInstruction* cmp2 = new (GetAllocator()) HLessThanOrEqual(sub2, constant_0);
+ if_inst = new (GetAllocator()) HIf(cmp2);
block2->AddInstruction(sub1);
block2->AddInstruction(sub2);
block2->AddInstruction(cmp2);
block2->AddInstruction(if_inst);
- HBasicBlock* block3 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block3 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block3);
- HBoundsCheck* bounds_check = new (&allocator_)
+ HBoundsCheck* bounds_check = new (GetAllocator())
HBoundsCheck(sub2, array_length, 0);
- HArraySet* array_set = new (&allocator_) HArraySet(
+ HArraySet* array_set = new (GetAllocator()) HArraySet(
null_check, bounds_check, constant_1, DataType::Type::kInt32, 0);
block3->AddInstruction(bounds_check);
block3->AddInstruction(array_set);
- HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(exit);
- exit->AddInstruction(new (&allocator_) HExit());
+ exit->AddInstruction(new (GetAllocator()) HExit());
block1->AddSuccessor(exit); // true successor
block1->AddSuccessor(block2); // false successor
block2->AddSuccessor(exit); // true successor
@@ -292,10 +291,10 @@
// array[5] = 1; // Can eliminate.
// array[4] = 1; // Can eliminate.
TEST_F(BoundsCheckEliminationTest, ConstantArrayBoundsElimination) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator_) HParameterValue(
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
entry->AddInstruction(parameter);
@@ -304,49 +303,49 @@
HInstruction* constant_6 = graph_->GetIntConstant(6);
HInstruction* constant_1 = graph_->GetIntConstant(1);
- HBasicBlock* block = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block);
entry->AddSuccessor(block);
- HNullCheck* null_check = new (&allocator_) HNullCheck(parameter, 0);
- HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0);
- HBoundsCheck* bounds_check6 = new (&allocator_)
+ HNullCheck* null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ HArrayLength* array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HBoundsCheck* bounds_check6 = new (GetAllocator())
HBoundsCheck(constant_6, array_length, 0);
- HInstruction* array_set = new (&allocator_) HArraySet(
+ HInstruction* array_set = new (GetAllocator()) HArraySet(
null_check, bounds_check6, constant_1, DataType::Type::kInt32, 0);
block->AddInstruction(null_check);
block->AddInstruction(array_length);
block->AddInstruction(bounds_check6);
block->AddInstruction(array_set);
- null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HBoundsCheck* bounds_check5 = new (&allocator_)
+ null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HBoundsCheck* bounds_check5 = new (GetAllocator())
HBoundsCheck(constant_5, array_length, 0);
- array_set = new (&allocator_) HArraySet(
+ array_set = new (GetAllocator()) HArraySet(
null_check, bounds_check5, constant_1, DataType::Type::kInt32, 0);
block->AddInstruction(null_check);
block->AddInstruction(array_length);
block->AddInstruction(bounds_check5);
block->AddInstruction(array_set);
- null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HBoundsCheck* bounds_check4 = new (&allocator_)
+ null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HBoundsCheck* bounds_check4 = new (GetAllocator())
HBoundsCheck(constant_4, array_length, 0);
- array_set = new (&allocator_) HArraySet(
+ array_set = new (GetAllocator()) HArraySet(
null_check, bounds_check4, constant_1, DataType::Type::kInt32, 0);
block->AddInstruction(null_check);
block->AddInstruction(array_length);
block->AddInstruction(bounds_check4);
block->AddInstruction(array_set);
- block->AddInstruction(new (&allocator_) HGoto());
+ block->AddInstruction(new (GetAllocator()) HGoto());
- HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(exit);
block->AddSuccessor(exit);
- exit->AddInstruction(new (&allocator_) HExit());
+ exit->AddInstruction(new (GetAllocator()) HExit());
RunBCE();
@@ -429,28 +428,28 @@
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1a) {
// for (int i=0; i<array.length; i++) { array[i] = 10; // Can eliminate with gvn. }
- HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, 0, 1);
+ HInstruction* bounds_check = BuildSSAGraph1(graph_, GetAllocator(), 0, 1);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1b) {
// for (int i=1; i<array.length; i++) { array[i] = 10; // Can eliminate. }
- HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, 1, 1);
+ HInstruction* bounds_check = BuildSSAGraph1(graph_, GetAllocator(), 1, 1);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1c) {
// for (int i=-1; i<array.length; i++) { array[i] = 10; // Can't eliminate. }
- HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, -1, 1);
+ HInstruction* bounds_check = BuildSSAGraph1(graph_, GetAllocator(), -1, 1);
RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
}
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1d) {
// for (int i=0; i<=array.length; i++) { array[i] = 10; // Can't eliminate. }
- HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, 0, 1, kCondGT);
+ HInstruction* bounds_check = BuildSSAGraph1(graph_, GetAllocator(), 0, 1, kCondGT);
RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
}
@@ -458,14 +457,14 @@
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1e) {
// for (int i=0; i<array.length; i += 2) {
// array[i] = 10; // Can't eliminate due to overflow concern. }
- HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, 0, 2);
+ HInstruction* bounds_check = BuildSSAGraph1(graph_, GetAllocator(), 0, 2);
RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
}
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1f) {
// for (int i=1; i<array.length; i += 2) { array[i] = 10; // Can eliminate. }
- HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, 1, 2);
+ HInstruction* bounds_check = BuildSSAGraph1(graph_, GetAllocator(), 1, 2);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
@@ -546,35 +545,35 @@
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2a) {
// for (int i=array.length; i>0; i--) { array[i-1] = 10; // Can eliminate with gvn. }
- HInstruction* bounds_check = BuildSSAGraph2(graph_, &allocator_, 0);
+ HInstruction* bounds_check = BuildSSAGraph2(graph_, GetAllocator(), 0);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2b) {
// for (int i=array.length; i>1; i--) { array[i-1] = 10; // Can eliminate. }
- HInstruction* bounds_check = BuildSSAGraph2(graph_, &allocator_, 1);
+ HInstruction* bounds_check = BuildSSAGraph2(graph_, GetAllocator(), 1);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2c) {
// for (int i=array.length; i>-1; i--) { array[i-1] = 10; // Can't eliminate. }
- HInstruction* bounds_check = BuildSSAGraph2(graph_, &allocator_, -1);
+ HInstruction* bounds_check = BuildSSAGraph2(graph_, GetAllocator(), -1);
RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
}
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2d) {
// for (int i=array.length; i>=0; i--) { array[i-1] = 10; // Can't eliminate. }
- HInstruction* bounds_check = BuildSSAGraph2(graph_, &allocator_, 0, -1, kCondLT);
+ HInstruction* bounds_check = BuildSSAGraph2(graph_, GetAllocator(), 0, -1, kCondLT);
RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
}
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2e) {
// for (int i=array.length; i>0; i-=2) { array[i-1] = 10; // Can eliminate. }
- HInstruction* bounds_check = BuildSSAGraph2(graph_, &allocator_, 0, -2);
+ HInstruction* bounds_check = BuildSSAGraph2(graph_, GetAllocator(), 0, -2);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
@@ -653,7 +652,7 @@
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3a) {
// int[] array = new int[10];
// for (int i=0; i<10; i++) { array[i] = 10; // Can eliminate. }
- HInstruction* bounds_check = BuildSSAGraph3(graph_, &allocator_, 0, 1, kCondGE);
+ HInstruction* bounds_check = BuildSSAGraph3(graph_, GetAllocator(), 0, 1, kCondGE);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
@@ -661,7 +660,7 @@
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3b) {
// int[] array = new int[10];
// for (int i=1; i<10; i++) { array[i] = 10; // Can eliminate. }
- HInstruction* bounds_check = BuildSSAGraph3(graph_, &allocator_, 1, 1, kCondGE);
+ HInstruction* bounds_check = BuildSSAGraph3(graph_, GetAllocator(), 1, 1, kCondGE);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
@@ -669,7 +668,7 @@
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3c) {
// int[] array = new int[10];
// for (int i=0; i<=10; i++) { array[i] = 10; // Can't eliminate. }
- HInstruction* bounds_check = BuildSSAGraph3(graph_, &allocator_, 0, 1, kCondGT);
+ HInstruction* bounds_check = BuildSSAGraph3(graph_, GetAllocator(), 0, 1, kCondGT);
RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
}
@@ -677,7 +676,7 @@
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3d) {
// int[] array = new int[10];
// for (int i=1; i<10; i+=8) { array[i] = 10; // Can eliminate. }
- HInstruction* bounds_check = BuildSSAGraph3(graph_, &allocator_, 1, 8, kCondGE);
+ HInstruction* bounds_check = BuildSSAGraph3(graph_, GetAllocator(), 1, 8, kCondGE);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
@@ -759,21 +758,21 @@
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination4a) {
// for (int i=0; i<array.length; i++) { array[array.length-i-1] = 10; // Can eliminate with gvn. }
- HInstruction* bounds_check = BuildSSAGraph4(graph_, &allocator_, 0);
+ HInstruction* bounds_check = BuildSSAGraph4(graph_, GetAllocator(), 0);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination4b) {
// for (int i=1; i<array.length; i++) { array[array.length-i-1] = 10; // Can eliminate. }
- HInstruction* bounds_check = BuildSSAGraph4(graph_, &allocator_, 1);
+ HInstruction* bounds_check = BuildSSAGraph4(graph_, GetAllocator(), 1);
RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination4c) {
// for (int i=0; i<=array.length; i++) { array[array.length-i] = 10; // Can't eliminate. }
- HInstruction* bounds_check = BuildSSAGraph4(graph_, &allocator_, 0, kCondGT);
+ HInstruction* bounds_check = BuildSSAGraph4(graph_, GetAllocator(), 0, kCondGT);
RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
}
@@ -790,10 +789,10 @@
// }
// }
TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator_) HParameterValue(
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
entry->AddInstruction(parameter);
@@ -801,23 +800,23 @@
HInstruction* constant_minus_1 = graph_->GetIntConstant(-1);
HInstruction* constant_1 = graph_->GetIntConstant(1);
- HBasicBlock* block = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block);
entry->AddSuccessor(block);
- block->AddInstruction(new (&allocator_) HGoto());
+ block->AddInstruction(new (GetAllocator()) HGoto());
- HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(exit);
- exit->AddInstruction(new (&allocator_) HExit());
+ exit->AddInstruction(new (GetAllocator()) HExit());
- HBasicBlock* outer_header = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* outer_header = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(outer_header);
- HPhi* phi_i = new (&allocator_) HPhi(&allocator_, 0, 0, DataType::Type::kInt32);
- HNullCheck* null_check = new (&allocator_) HNullCheck(parameter, 0);
- HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0);
- HAdd* add = new (&allocator_) HAdd(DataType::Type::kInt32, array_length, constant_minus_1);
- HInstruction* cmp = new (&allocator_) HGreaterThanOrEqual(phi_i, add);
- HIf* if_inst = new (&allocator_) HIf(cmp);
+ HPhi* phi_i = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32);
+ HNullCheck* null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ HArrayLength* array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HAdd* add = new (GetAllocator()) HAdd(DataType::Type::kInt32, array_length, constant_minus_1);
+ HInstruction* cmp = new (GetAllocator()) HGreaterThanOrEqual(phi_i, add);
+ HIf* if_inst = new (GetAllocator()) HIf(cmp);
outer_header->AddPhi(phi_i);
outer_header->AddInstruction(null_check);
outer_header->AddInstruction(array_length);
@@ -826,15 +825,15 @@
outer_header->AddInstruction(if_inst);
phi_i->AddInput(constant_0);
- HBasicBlock* inner_header = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* inner_header = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(inner_header);
- HPhi* phi_j = new (&allocator_) HPhi(&allocator_, 0, 0, DataType::Type::kInt32);
- null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HSub* sub = new (&allocator_) HSub(DataType::Type::kInt32, array_length, phi_i);
- add = new (&allocator_) HAdd(DataType::Type::kInt32, sub, constant_minus_1);
- cmp = new (&allocator_) HGreaterThanOrEqual(phi_j, add);
- if_inst = new (&allocator_) HIf(cmp);
+ HPhi* phi_j = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32);
+ null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HSub* sub = new (GetAllocator()) HSub(DataType::Type::kInt32, array_length, phi_i);
+ add = new (GetAllocator()) HAdd(DataType::Type::kInt32, sub, constant_minus_1);
+ cmp = new (GetAllocator()) HGreaterThanOrEqual(phi_j, add);
+ if_inst = new (GetAllocator()) HIf(cmp);
inner_header->AddPhi(phi_j);
inner_header->AddInstruction(null_check);
inner_header->AddInstruction(array_length);
@@ -844,25 +843,25 @@
inner_header->AddInstruction(if_inst);
phi_j->AddInput(constant_0);
- HBasicBlock* inner_body_compare = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* inner_body_compare = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(inner_body_compare);
- null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HBoundsCheck* bounds_check1 = new (&allocator_) HBoundsCheck(phi_j, array_length, 0);
- HArrayGet* array_get_j = new (&allocator_)
+ null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HBoundsCheck* bounds_check1 = new (GetAllocator()) HBoundsCheck(phi_j, array_length, 0);
+ HArrayGet* array_get_j = new (GetAllocator())
HArrayGet(null_check, bounds_check1, DataType::Type::kInt32, 0);
inner_body_compare->AddInstruction(null_check);
inner_body_compare->AddInstruction(array_length);
inner_body_compare->AddInstruction(bounds_check1);
inner_body_compare->AddInstruction(array_get_j);
- HInstruction* j_plus_1 = new (&allocator_) HAdd(DataType::Type::kInt32, phi_j, constant_1);
- null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HBoundsCheck* bounds_check2 = new (&allocator_) HBoundsCheck(j_plus_1, array_length, 0);
- HArrayGet* array_get_j_plus_1 = new (&allocator_)
+ HInstruction* j_plus_1 = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi_j, constant_1);
+ null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HBoundsCheck* bounds_check2 = new (GetAllocator()) HBoundsCheck(j_plus_1, array_length, 0);
+ HArrayGet* array_get_j_plus_1 = new (GetAllocator())
HArrayGet(null_check, bounds_check2, DataType::Type::kInt32, 0);
- cmp = new (&allocator_) HGreaterThanOrEqual(array_get_j, array_get_j_plus_1);
- if_inst = new (&allocator_) HIf(cmp);
+ cmp = new (GetAllocator()) HGreaterThanOrEqual(array_get_j, array_get_j_plus_1);
+ if_inst = new (GetAllocator()) HIf(cmp);
inner_body_compare->AddInstruction(j_plus_1);
inner_body_compare->AddInstruction(null_check);
inner_body_compare->AddInstruction(array_length);
@@ -871,14 +870,14 @@
inner_body_compare->AddInstruction(cmp);
inner_body_compare->AddInstruction(if_inst);
- HBasicBlock* inner_body_swap = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* inner_body_swap = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(inner_body_swap);
- j_plus_1 = new (&allocator_) HAdd(DataType::Type::kInt32, phi_j, constant_1);
+ j_plus_1 = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi_j, constant_1);
// temp = array[j+1]
- null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HInstruction* bounds_check3 = new (&allocator_) HBoundsCheck(j_plus_1, array_length, 0);
- array_get_j_plus_1 = new (&allocator_)
+ null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HInstruction* bounds_check3 = new (GetAllocator()) HBoundsCheck(j_plus_1, array_length, 0);
+ array_get_j_plus_1 = new (GetAllocator())
HArrayGet(null_check, bounds_check3, DataType::Type::kInt32, 0);
inner_body_swap->AddInstruction(j_plus_1);
inner_body_swap->AddInstruction(null_check);
@@ -886,48 +885,48 @@
inner_body_swap->AddInstruction(bounds_check3);
inner_body_swap->AddInstruction(array_get_j_plus_1);
// array[j+1] = array[j]
- null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HInstruction* bounds_check4 = new (&allocator_) HBoundsCheck(phi_j, array_length, 0);
- array_get_j = new (&allocator_)
+ null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HInstruction* bounds_check4 = new (GetAllocator()) HBoundsCheck(phi_j, array_length, 0);
+ array_get_j = new (GetAllocator())
HArrayGet(null_check, bounds_check4, DataType::Type::kInt32, 0);
inner_body_swap->AddInstruction(null_check);
inner_body_swap->AddInstruction(array_length);
inner_body_swap->AddInstruction(bounds_check4);
inner_body_swap->AddInstruction(array_get_j);
- null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HInstruction* bounds_check5 = new (&allocator_) HBoundsCheck(j_plus_1, array_length, 0);
- HArraySet* array_set_j_plus_1 = new (&allocator_)
+ null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HInstruction* bounds_check5 = new (GetAllocator()) HBoundsCheck(j_plus_1, array_length, 0);
+ HArraySet* array_set_j_plus_1 = new (GetAllocator())
HArraySet(null_check, bounds_check5, array_get_j, DataType::Type::kInt32, 0);
inner_body_swap->AddInstruction(null_check);
inner_body_swap->AddInstruction(array_length);
inner_body_swap->AddInstruction(bounds_check5);
inner_body_swap->AddInstruction(array_set_j_plus_1);
// array[j] = temp
- null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HInstruction* bounds_check6 = new (&allocator_) HBoundsCheck(phi_j, array_length, 0);
- HArraySet* array_set_j = new (&allocator_)
+ null_check = new (GetAllocator()) HNullCheck(parameter, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HInstruction* bounds_check6 = new (GetAllocator()) HBoundsCheck(phi_j, array_length, 0);
+ HArraySet* array_set_j = new (GetAllocator())
HArraySet(null_check, bounds_check6, array_get_j_plus_1, DataType::Type::kInt32, 0);
inner_body_swap->AddInstruction(null_check);
inner_body_swap->AddInstruction(array_length);
inner_body_swap->AddInstruction(bounds_check6);
inner_body_swap->AddInstruction(array_set_j);
- inner_body_swap->AddInstruction(new (&allocator_) HGoto());
+ inner_body_swap->AddInstruction(new (GetAllocator()) HGoto());
- HBasicBlock* inner_body_add = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* inner_body_add = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(inner_body_add);
- add = new (&allocator_) HAdd(DataType::Type::kInt32, phi_j, constant_1);
+ add = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi_j, constant_1);
inner_body_add->AddInstruction(add);
- inner_body_add->AddInstruction(new (&allocator_) HGoto());
+ inner_body_add->AddInstruction(new (GetAllocator()) HGoto());
phi_j->AddInput(add);
- HBasicBlock* outer_body_add = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* outer_body_add = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(outer_body_add);
- add = new (&allocator_) HAdd(DataType::Type::kInt32, phi_i, constant_1);
+ add = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi_i, constant_1);
outer_body_add->AddInstruction(add);
- outer_body_add->AddInstruction(new (&allocator_) HGoto());
+ outer_body_add->AddInstruction(new (GetAllocator()) HGoto());
phi_i->AddInput(add);
block->AddSuccessor(outer_header);
@@ -961,10 +960,10 @@
// array[param_i%10] = 10; // Can't eliminate, when param_i < 0
// }
TEST_F(BoundsCheckEliminationTest, ModArrayBoundsElimination) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
- HInstruction* param_i = new (&allocator_)
+ HInstruction* param_i = new (GetAllocator())
HParameterValue(graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32);
entry->AddInstruction(param_i);
@@ -974,17 +973,17 @@
HInstruction* constant_200 = graph_->GetIntConstant(200);
HInstruction* constant_minus_10 = graph_->GetIntConstant(-10);
- HBasicBlock* block = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block);
entry->AddSuccessor(block);
// We pass a bogus constant for the class to avoid mocking one.
- HInstruction* new_array = new (&allocator_) HNewArray(constant_10, constant_10, 0);
+ HInstruction* new_array = new (GetAllocator()) HNewArray(constant_10, constant_10, 0);
block->AddInstruction(new_array);
- block->AddInstruction(new (&allocator_) HGoto());
+ block->AddInstruction(new (GetAllocator()) HGoto());
- HBasicBlock* loop_header = new (&allocator_) HBasicBlock(graph_);
- HBasicBlock* loop_body = new (&allocator_) HBasicBlock(graph_);
- HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* loop_header = new (GetAllocator()) HBasicBlock(graph_);
+ HBasicBlock* loop_body = new (GetAllocator()) HBasicBlock(graph_);
+ HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(loop_header);
graph_->AddBlock(loop_body);
@@ -994,9 +993,9 @@
loop_header->AddSuccessor(loop_body); // false successor
loop_body->AddSuccessor(loop_header);
- HPhi* phi = new (&allocator_) HPhi(&allocator_, 0, 0, DataType::Type::kInt32);
- HInstruction* cmp = new (&allocator_) HGreaterThanOrEqual(phi, constant_200);
- HInstruction* if_inst = new (&allocator_) HIf(cmp);
+ HPhi* phi = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32);
+ HInstruction* cmp = new (GetAllocator()) HGreaterThanOrEqual(phi, constant_200);
+ HInstruction* if_inst = new (GetAllocator()) HIf(cmp);
loop_header->AddPhi(phi);
loop_header->AddInstruction(cmp);
loop_header->AddInstruction(if_inst);
@@ -1005,49 +1004,52 @@
//////////////////////////////////////////////////////////////////////////////////
// LOOP BODY:
// array[i % 10] = 10;
- HRem* i_mod_10 = new (&allocator_) HRem(DataType::Type::kInt32, phi, constant_10, 0);
- HBoundsCheck* bounds_check_i_mod_10 = new (&allocator_) HBoundsCheck(i_mod_10, constant_10, 0);
- HInstruction* array_set = new (&allocator_) HArraySet(
+ HRem* i_mod_10 = new (GetAllocator()) HRem(DataType::Type::kInt32, phi, constant_10, 0);
+ HBoundsCheck* bounds_check_i_mod_10 = new (GetAllocator()) HBoundsCheck(i_mod_10, constant_10, 0);
+ HInstruction* array_set = new (GetAllocator()) HArraySet(
new_array, bounds_check_i_mod_10, constant_10, DataType::Type::kInt32, 0);
loop_body->AddInstruction(i_mod_10);
loop_body->AddInstruction(bounds_check_i_mod_10);
loop_body->AddInstruction(array_set);
// array[i % 1] = 10;
- HRem* i_mod_1 = new (&allocator_) HRem(DataType::Type::kInt32, phi, constant_1, 0);
- HBoundsCheck* bounds_check_i_mod_1 = new (&allocator_) HBoundsCheck(i_mod_1, constant_10, 0);
- array_set = new (&allocator_) HArraySet(
+ HRem* i_mod_1 = new (GetAllocator()) HRem(DataType::Type::kInt32, phi, constant_1, 0);
+ HBoundsCheck* bounds_check_i_mod_1 = new (GetAllocator()) HBoundsCheck(i_mod_1, constant_10, 0);
+ array_set = new (GetAllocator()) HArraySet(
new_array, bounds_check_i_mod_1, constant_10, DataType::Type::kInt32, 0);
loop_body->AddInstruction(i_mod_1);
loop_body->AddInstruction(bounds_check_i_mod_1);
loop_body->AddInstruction(array_set);
// array[i % 200] = 10;
- HRem* i_mod_200 = new (&allocator_) HRem(DataType::Type::kInt32, phi, constant_1, 0);
- HBoundsCheck* bounds_check_i_mod_200 = new (&allocator_) HBoundsCheck(i_mod_200, constant_10, 0);
- array_set = new (&allocator_) HArraySet(
+ HRem* i_mod_200 = new (GetAllocator()) HRem(DataType::Type::kInt32, phi, constant_1, 0);
+ HBoundsCheck* bounds_check_i_mod_200 = new (GetAllocator()) HBoundsCheck(
+ i_mod_200, constant_10, 0);
+ array_set = new (GetAllocator()) HArraySet(
new_array, bounds_check_i_mod_200, constant_10, DataType::Type::kInt32, 0);
loop_body->AddInstruction(i_mod_200);
loop_body->AddInstruction(bounds_check_i_mod_200);
loop_body->AddInstruction(array_set);
// array[i % -10] = 10;
- HRem* i_mod_minus_10 = new (&allocator_) HRem(DataType::Type::kInt32, phi, constant_minus_10, 0);
- HBoundsCheck* bounds_check_i_mod_minus_10 = new (&allocator_) HBoundsCheck(
+ HRem* i_mod_minus_10 = new (GetAllocator()) HRem(
+ DataType::Type::kInt32, phi, constant_minus_10, 0);
+ HBoundsCheck* bounds_check_i_mod_minus_10 = new (GetAllocator()) HBoundsCheck(
i_mod_minus_10, constant_10, 0);
- array_set = new (&allocator_) HArraySet(
+ array_set = new (GetAllocator()) HArraySet(
new_array, bounds_check_i_mod_minus_10, constant_10, DataType::Type::kInt32, 0);
loop_body->AddInstruction(i_mod_minus_10);
loop_body->AddInstruction(bounds_check_i_mod_minus_10);
loop_body->AddInstruction(array_set);
// array[i%array.length] = 10;
- HNullCheck* null_check = new (&allocator_) HNullCheck(new_array, 0);
- HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0);
- HRem* i_mod_array_length = new (&allocator_) HRem(DataType::Type::kInt32, phi, array_length, 0);
- HBoundsCheck* bounds_check_i_mod_array_len = new (&allocator_) HBoundsCheck(
+ HNullCheck* null_check = new (GetAllocator()) HNullCheck(new_array, 0);
+ HArrayLength* array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HRem* i_mod_array_length = new (GetAllocator()) HRem(
+ DataType::Type::kInt32, phi, array_length, 0);
+ HBoundsCheck* bounds_check_i_mod_array_len = new (GetAllocator()) HBoundsCheck(
i_mod_array_length, array_length, 0);
- array_set = new (&allocator_) HArraySet(
+ array_set = new (GetAllocator()) HArraySet(
null_check, bounds_check_i_mod_array_len, constant_10, DataType::Type::kInt32, 0);
loop_body->AddInstruction(null_check);
loop_body->AddInstruction(array_length);
@@ -1056,23 +1058,23 @@
loop_body->AddInstruction(array_set);
// array[param_i % 10] = 10;
- HRem* param_i_mod_10 = new (&allocator_) HRem(DataType::Type::kInt32, param_i, constant_10, 0);
- HBoundsCheck* bounds_check_param_i_mod_10 = new (&allocator_) HBoundsCheck(
+ HRem* param_i_mod_10 = new (GetAllocator()) HRem(DataType::Type::kInt32, param_i, constant_10, 0);
+ HBoundsCheck* bounds_check_param_i_mod_10 = new (GetAllocator()) HBoundsCheck(
param_i_mod_10, constant_10, 0);
- array_set = new (&allocator_) HArraySet(
+ array_set = new (GetAllocator()) HArraySet(
new_array, bounds_check_param_i_mod_10, constant_10, DataType::Type::kInt32, 0);
loop_body->AddInstruction(param_i_mod_10);
loop_body->AddInstruction(bounds_check_param_i_mod_10);
loop_body->AddInstruction(array_set);
// array[param_i%array.length] = 10;
- null_check = new (&allocator_) HNullCheck(new_array, 0);
- array_length = new (&allocator_) HArrayLength(null_check, 0);
- HRem* param_i_mod_array_length = new (&allocator_) HRem(
+ null_check = new (GetAllocator()) HNullCheck(new_array, 0);
+ array_length = new (GetAllocator()) HArrayLength(null_check, 0);
+ HRem* param_i_mod_array_length = new (GetAllocator()) HRem(
DataType::Type::kInt32, param_i, array_length, 0);
- HBoundsCheck* bounds_check_param_i_mod_array_len = new (&allocator_) HBoundsCheck(
+ HBoundsCheck* bounds_check_param_i_mod_array_len = new (GetAllocator()) HBoundsCheck(
param_i_mod_array_length, array_length, 0);
- array_set = new (&allocator_) HArraySet(
+ array_set = new (GetAllocator()) HArraySet(
null_check, bounds_check_param_i_mod_array_len, constant_10, DataType::Type::kInt32, 0);
loop_body->AddInstruction(null_check);
loop_body->AddInstruction(array_length);
@@ -1081,13 +1083,13 @@
loop_body->AddInstruction(array_set);
// i++;
- HInstruction* add = new (&allocator_) HAdd(DataType::Type::kInt32, phi, constant_1);
+ HInstruction* add = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi, constant_1);
loop_body->AddInstruction(add);
- loop_body->AddInstruction(new (&allocator_) HGoto());
+ loop_body->AddInstruction(new (GetAllocator()) HGoto());
phi->AddInput(add);
//////////////////////////////////////////////////////////////////////////////////
- exit->AddInstruction(new (&allocator_) HExit());
+ exit->AddInstruction(new (GetAllocator()) HExit());
RunBCE();
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 0e708ed..76350a6 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -32,13 +32,12 @@
namespace art {
HGraphBuilder::HGraphBuilder(HGraph* graph,
- DexCompilationUnit* dex_compilation_unit,
- const DexCompilationUnit* const outer_compilation_unit,
+ const DexCompilationUnit* dex_compilation_unit,
+ const DexCompilationUnit* outer_compilation_unit,
CompilerDriver* driver,
CodeGenerator* code_generator,
OptimizingCompilerStats* compiler_stats,
const uint8_t* interpreter_metadata,
- Handle<mirror::DexCache> dex_cache,
VariableSizedHandleScope* handles)
: graph_(graph),
dex_file_(&graph->GetDexFile()),
@@ -63,7 +62,7 @@
code_generator,
interpreter_metadata,
compiler_stats,
- dex_cache,
+ dex_compilation_unit->GetDexCache(),
handles) {}
bool HGraphBuilder::SkipCompilation(size_t number_of_branches) {
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 9524fe2..6c5985a 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -36,13 +36,12 @@
class HGraphBuilder : public ValueObject {
public:
HGraphBuilder(HGraph* graph,
- DexCompilationUnit* dex_compilation_unit,
- const DexCompilationUnit* const outer_compilation_unit,
+ const DexCompilationUnit* dex_compilation_unit,
+ const DexCompilationUnit* outer_compilation_unit,
CompilerDriver* driver,
CodeGenerator* code_generator,
OptimizingCompilerStats* compiler_stats,
const uint8_t* interpreter_metadata,
- Handle<mirror::DexCache> dex_cache,
VariableSizedHandleScope* handles);
// Only for unit testing.
@@ -89,7 +88,7 @@
// The compilation unit of the current method being compiled. Note that
// it can be an inlined method.
- DexCompilationUnit* const dex_compilation_unit_;
+ const DexCompilationUnit* const dex_compilation_unit_;
CompilerDriver* const compiler_driver_;
diff --git a/compiler/optimizing/cha_guard_optimization.cc b/compiler/optimizing/cha_guard_optimization.cc
index c806dbf..3addaee 100644
--- a/compiler/optimizing/cha_guard_optimization.cc
+++ b/compiler/optimizing/cha_guard_optimization.cc
@@ -36,7 +36,7 @@
: HGraphVisitor(graph),
block_has_cha_guard_(GetGraph()->GetBlocks().size(),
0,
- graph->GetArena()->Adapter(kArenaAllocCHA)),
+ graph->GetAllocator()->Adapter(kArenaAllocCHA)),
instruction_iterator_(nullptr) {
number_of_guards_to_visit_ = GetGraph()->GetNumberOfCHAGuards();
DCHECK_NE(number_of_guards_to_visit_, 0u);
@@ -202,8 +202,8 @@
HInstruction* suspend = loop_info->GetSuspendCheck();
// Need a new deoptimize instruction that copies the environment
// of the suspend instruction for the loop.
- HDeoptimize* deoptimize = new (GetGraph()->GetArena()) HDeoptimize(
- GetGraph()->GetArena(), compare, DeoptimizationKind::kCHA, suspend->GetDexPc());
+ HDeoptimize* deoptimize = new (GetGraph()->GetAllocator()) HDeoptimize(
+ GetGraph()->GetAllocator(), compare, DeoptimizationKind::kCHA, suspend->GetDexPc());
pre_header->InsertInstructionBefore(deoptimize, pre_header->GetLastInstruction());
deoptimize->CopyEnvironmentFromWithLoopPhiAdjustment(
suspend->GetEnvironment(), loop_info->GetHeader());
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 3cb3792..dd8e3d2 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -322,7 +322,7 @@
void CodeGenerator::CreateCommonInvokeLocationSummary(
HInvoke* invoke, InvokeDexCallingConventionVisitor* visitor) {
- ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetArena();
+ ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
LocationSummary* locations = new (allocator) LocationSummary(invoke,
LocationSummary::kCallOnMainOnly);
@@ -420,7 +420,7 @@
bool is_get = field_access->IsUnresolvedInstanceFieldGet()
|| field_access->IsUnresolvedStaticFieldGet();
- ArenaAllocator* allocator = field_access->GetBlock()->GetGraph()->GetArena();
+ ArenaAllocator* allocator = field_access->GetBlock()->GetGraph()->GetAllocator();
LocationSummary* locations =
new (allocator) LocationSummary(field_access, LocationSummary::kCallOnMainOnly);
@@ -541,7 +541,7 @@
Location runtime_return_location) {
DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kRuntimeCall);
DCHECK_EQ(cls->InputCount(), 1u);
- LocationSummary* locations = new (cls->GetBlock()->GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (cls->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
cls, LocationSummary::kCallOnMainOnly);
locations->SetInAt(0, Location::NoLocation());
locations->AddTemp(runtime_type_index_location);
@@ -617,61 +617,49 @@
const InstructionSetFeatures& isa_features,
const CompilerOptions& compiler_options,
OptimizingCompilerStats* stats) {
- ArenaAllocator* arena = graph->GetArena();
+ ArenaAllocator* allocator = graph->GetAllocator();
switch (instruction_set) {
#ifdef ART_ENABLE_CODEGEN_arm
case kArm:
case kThumb2: {
return std::unique_ptr<CodeGenerator>(
- new (arena) arm::CodeGeneratorARMVIXL(graph,
- *isa_features.AsArmInstructionSetFeatures(),
- compiler_options,
- stats));
+ new (allocator) arm::CodeGeneratorARMVIXL(
+ graph, *isa_features.AsArmInstructionSetFeatures(), compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64: {
return std::unique_ptr<CodeGenerator>(
- new (arena) arm64::CodeGeneratorARM64(graph,
- *isa_features.AsArm64InstructionSetFeatures(),
- compiler_options,
- stats));
+ new (allocator) arm64::CodeGeneratorARM64(
+ graph, *isa_features.AsArm64InstructionSetFeatures(), compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_mips
case kMips: {
return std::unique_ptr<CodeGenerator>(
- new (arena) mips::CodeGeneratorMIPS(graph,
- *isa_features.AsMipsInstructionSetFeatures(),
- compiler_options,
- stats));
+ new (allocator) mips::CodeGeneratorMIPS(
+ graph, *isa_features.AsMipsInstructionSetFeatures(), compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64: {
return std::unique_ptr<CodeGenerator>(
- new (arena) mips64::CodeGeneratorMIPS64(graph,
- *isa_features.AsMips64InstructionSetFeatures(),
- compiler_options,
- stats));
+ new (allocator) mips64::CodeGeneratorMIPS64(
+ graph, *isa_features.AsMips64InstructionSetFeatures(), compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_x86
case kX86: {
return std::unique_ptr<CodeGenerator>(
- new (arena) x86::CodeGeneratorX86(graph,
- *isa_features.AsX86InstructionSetFeatures(),
- compiler_options,
- stats));
+ new (allocator) x86::CodeGeneratorX86(
+ graph, *isa_features.AsX86InstructionSetFeatures(), compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64: {
return std::unique_ptr<CodeGenerator>(
- new (arena) x86_64::CodeGeneratorX86_64(graph,
- *isa_features.AsX86_64InstructionSetFeatures(),
- compiler_options,
- stats));
+ new (allocator) x86_64::CodeGeneratorX86_64(
+ graph, *isa_features.AsX86_64InstructionSetFeatures(), compiler_options, stats));
}
#endif
default:
@@ -712,7 +700,7 @@
// One can write loops through try/catch, which we do not support for OSR anyway.
return;
}
- ArenaVector<HSuspendCheck*> loop_headers(graph.GetArena()->Adapter(kArenaAllocMisc));
+ ArenaVector<HSuspendCheck*> loop_headers(graph.GetAllocator()->Adapter(kArenaAllocMisc));
for (HBasicBlock* block : graph.GetReversePostOrder()) {
if (block->IsLoopHeader()) {
HSuspendCheck* suspend_check = block->GetLoopInformation()->GetSuspendCheck();
@@ -721,7 +709,8 @@
}
}
}
- ArenaVector<size_t> covered(loop_headers.size(), 0, graph.GetArena()->Adapter(kArenaAllocMisc));
+ ArenaVector<size_t> covered(
+ loop_headers.size(), 0, graph.GetAllocator()->Adapter(kArenaAllocMisc));
IterationRange<DexInstructionIterator> instructions = code_item.Instructions();
for (auto it = instructions.begin(); it != instructions.end(); ++it) {
const uint32_t dex_pc = it.GetDexPC(instructions.begin());
@@ -909,7 +898,7 @@
}
void CodeGenerator::RecordCatchBlockInfo() {
- ArenaAllocator* arena = graph_->GetArena();
+ ArenaAllocator* allocator = graph_->GetAllocator();
for (HBasicBlock* block : *block_order_) {
if (!block->IsCatchBlock()) {
@@ -924,7 +913,7 @@
// The stack mask is not used, so we leave it empty.
ArenaBitVector* stack_mask =
- ArenaBitVector::Create(arena, 0, /* expandable */ true, kArenaAllocCodeGenerator);
+ ArenaBitVector::Create(allocator, 0, /* expandable */ true, kArenaAllocCodeGenerator);
stack_map_stream_.BeginStackMapEntry(dex_pc,
native_pc,
@@ -1194,7 +1183,8 @@
if (can_throw_into_catch_block) {
call_kind = LocationSummary::kCallOnSlowPath;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
if (can_throw_into_catch_block && compiler_options_.GetImplicitNullChecks()) {
locations->SetCustomSlowPathCallerSaves(caller_saves); // Default: no caller-save registers.
}
@@ -1237,7 +1227,7 @@
Location from2,
Location to2,
DataType::Type type2) {
- HParallelMove parallel_move(GetGraph()->GetArena());
+ HParallelMove parallel_move(GetGraph()->GetAllocator());
parallel_move.AddMove(from1, to1, type1, nullptr);
parallel_move.AddMove(from2, to2, type2, nullptr);
GetMoveResolver()->EmitNativeCode(¶llel_move);
@@ -1400,7 +1390,7 @@
return;
}
- ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetArena();
+ ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
LocationSummary* locations = new (allocator) LocationSummary(invoke,
LocationSummary::kCallOnSlowPath,
kIntrinsified);
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index ac3c839..2c3cf26 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -605,26 +605,26 @@
fpu_spill_mask_(0),
first_register_slot_in_slow_path_(0),
allocated_registers_(RegisterSet::Empty()),
- blocked_core_registers_(graph->GetArena()->AllocArray<bool>(number_of_core_registers,
- kArenaAllocCodeGenerator)),
- blocked_fpu_registers_(graph->GetArena()->AllocArray<bool>(number_of_fpu_registers,
- kArenaAllocCodeGenerator)),
+ blocked_core_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_core_registers,
+ kArenaAllocCodeGenerator)),
+ blocked_fpu_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_fpu_registers,
+ kArenaAllocCodeGenerator)),
number_of_core_registers_(number_of_core_registers),
number_of_fpu_registers_(number_of_fpu_registers),
number_of_register_pairs_(number_of_register_pairs),
core_callee_save_mask_(core_callee_save_mask),
fpu_callee_save_mask_(fpu_callee_save_mask),
- stack_map_stream_(graph->GetArena(), graph->GetInstructionSet()),
+ stack_map_stream_(graph->GetAllocator(), graph->GetInstructionSet()),
block_order_(nullptr),
jit_string_roots_(StringReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_class_roots_(TypeReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
disasm_info_(nullptr),
stats_(stats),
graph_(graph),
compiler_options_(compiler_options),
- slow_paths_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ slow_paths_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
current_slow_path_(nullptr),
current_block_index_(0),
is_leaf_(true),
@@ -668,8 +668,8 @@
// We use raw array allocations instead of ArenaVector<> because Labels are
// non-constructible and non-movable and as such cannot be held in a vector.
size_t size = GetGraph()->GetBlocks().size();
- LabelType* labels = GetGraph()->GetArena()->AllocArray<LabelType>(size,
- kArenaAllocCodeGenerator);
+ LabelType* labels =
+ GetGraph()->GetAllocator()->AllocArray<LabelType>(size, kArenaAllocCodeGenerator);
for (size_t i = 0; i != size; ++i) {
new(labels + i) LabelType();
}
@@ -823,7 +823,8 @@
SlowPathGenerator(HGraph* graph, CodeGenerator* codegen)
: graph_(graph),
codegen_(codegen),
- slow_path_map_(std::less<uint32_t>(), graph->GetArena()->Adapter(kArenaAllocSlowPaths)) {}
+ slow_path_map_(std::less<uint32_t>(),
+ graph->GetAllocator()->Adapter(kArenaAllocSlowPaths)) {}
// Creates and adds a new slow-path, if needed, or returns existing one otherwise.
// Templating the method (rather than the whole class) on the slow-path type enables
@@ -857,10 +858,11 @@
}
} else {
// First time this dex-pc is seen.
- iter = slow_path_map_.Put(dex_pc, {{}, {graph_->GetArena()->Adapter(kArenaAllocSlowPaths)}});
+ iter = slow_path_map_.Put(dex_pc,
+ {{}, {graph_->GetAllocator()->Adapter(kArenaAllocSlowPaths)}});
}
// Cannot share: create and add new slow-path for this particular dex-pc.
- SlowPathCodeType* slow_path = new (graph_->GetArena()) SlowPathCodeType(instruction);
+ SlowPathCodeType* slow_path = new (graph_->GetAllocator()) SlowPathCodeType(instruction);
iter->second.emplace_back(std::make_pair(instruction, slow_path));
codegen_->AddSlowPath(slow_path);
return slow_path;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 42e9f68..9be9117 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -620,7 +620,7 @@
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(
locations->InAt(0),
LocationFrom(calling_convention.GetRegisterAt(0)),
@@ -1294,7 +1294,7 @@
// We're moving two or three locations to locations that could
// overlap, so we need a parallel move resolver.
InvokeRuntimeCallingConvention calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(ref_,
LocationFrom(calling_convention.GetRegisterAt(0)),
type,
@@ -1453,28 +1453,28 @@
callee_saved_fp_registers.GetList(),
compiler_options,
stats),
- block_labels_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ block_labels_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ jump_tables_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
location_builder_(graph, this),
instruction_visitor_(graph, this),
- move_resolver_(graph->GetArena(), this),
- assembler_(graph->GetArena()),
+ move_resolver_(graph->GetAllocator(), this),
+ assembler_(graph->GetAllocator()),
isa_features_(isa_features),
uint32_literals_(std::less<uint32_t>(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
uint64_literals_(std::less<uint64_t>(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- string_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- baker_read_barrier_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ baker_read_barrier_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_class_patches_(TypeReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
// Save the link register (containing the return address) to mimic Quick.
AddAllocatedRegister(LocationFrom(lr));
}
@@ -1929,17 +1929,18 @@
const MemOperand& src) {
switch (type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
__ Ldrb(Register(dst), src);
break;
case DataType::Type::kInt8:
__ Ldrsb(Register(dst), src);
break;
- case DataType::Type::kInt16:
- __ Ldrsh(Register(dst), src);
- break;
case DataType::Type::kUint16:
__ Ldrh(Register(dst), src);
break;
+ case DataType::Type::kInt16:
+ __ Ldrsh(Register(dst), src);
+ break;
case DataType::Type::kInt32:
case DataType::Type::kReference:
case DataType::Type::kInt64:
@@ -1972,14 +1973,7 @@
MemOperand base = MemOperand(temp_base);
switch (type) {
case DataType::Type::kBool:
- {
- ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize);
- __ ldarb(Register(dst), base);
- if (needs_null_check) {
- MaybeRecordImplicitNullCheck(instruction);
- }
- }
- break;
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
{
ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize);
@@ -1988,17 +1982,11 @@
MaybeRecordImplicitNullCheck(instruction);
}
}
- __ Sbfx(Register(dst), Register(dst), 0, DataType::Size(type) * kBitsPerByte);
- break;
- case DataType::Type::kUint16:
- {
- ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize);
- __ ldarh(Register(dst), base);
- if (needs_null_check) {
- MaybeRecordImplicitNullCheck(instruction);
- }
+ if (type == DataType::Type::kInt8) {
+ __ Sbfx(Register(dst), Register(dst), 0, DataType::Size(type) * kBitsPerByte);
}
break;
+ case DataType::Type::kUint16:
case DataType::Type::kInt16:
{
ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize);
@@ -2007,7 +1995,9 @@
MaybeRecordImplicitNullCheck(instruction);
}
}
- __ Sbfx(Register(dst), Register(dst), 0, DataType::Size(type) * kBitsPerByte);
+ if (type == DataType::Type::kInt16) {
+ __ Sbfx(Register(dst), Register(dst), 0, DataType::Size(type) * kBitsPerByte);
+ }
break;
case DataType::Type::kInt32:
case DataType::Type::kReference:
@@ -2048,6 +2038,7 @@
const MemOperand& dst) {
switch (type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
__ Strb(Register(src), dst);
break;
@@ -2087,6 +2078,7 @@
// Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted.
switch (type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
{
ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize);
@@ -2212,7 +2204,7 @@
SuspendCheckSlowPathARM64* slow_path =
down_cast<SuspendCheckSlowPathARM64*>(instruction->GetSlowPath());
if (slow_path == nullptr) {
- slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathARM64(instruction, successor);
+ slow_path = new (GetGraph()->GetAllocator()) SuspendCheckSlowPathARM64(instruction, successor);
instruction->SetSlowPath(slow_path);
codegen_->AddSlowPath(slow_path);
if (successor != nullptr) {
@@ -2243,36 +2235,9 @@
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
-#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M) \
- /* No unimplemented IR. */
-
-#define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode
-
-enum UnimplementedInstructionBreakCode {
- // Using a base helps identify when we hit such breakpoints.
- UnimplementedInstructionBreakCodeBaseCode = 0x900,
-#define ENUM_UNIMPLEMENTED_INSTRUCTION(name) UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name),
- FOR_EACH_UNIMPLEMENTED_INSTRUCTION(ENUM_UNIMPLEMENTED_INSTRUCTION)
-#undef ENUM_UNIMPLEMENTED_INSTRUCTION
-};
-
-#define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS(name) \
- void InstructionCodeGeneratorARM64::Visit##name(H##name* instr ATTRIBUTE_UNUSED) { \
- __ Brk(UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name)); \
- } \
- void LocationsBuilderARM64::Visit##name(H##name* instr) { \
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); \
- locations->SetOut(Location::Any()); \
- }
- FOR_EACH_UNIMPLEMENTED_INSTRUCTION(DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS)
-#undef DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS
-
-#undef UNIMPLEMENTED_INSTRUCTION_BREAK_CODE
-#undef FOR_EACH_UNIMPLEMENTED_INSTRUCTION
-
void LocationsBuilderARM64::HandleBinaryOp(HBinaryOperation* instr) {
DCHECK_EQ(instr->InputCount(), 2U);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instr);
DataType::Type type = instr->GetResultType();
switch (type) {
case DataType::Type::kInt32:
@@ -2301,10 +2266,10 @@
bool object_field_get_with_read_barrier =
kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction,
- object_field_get_with_read_barrier ?
- LocationSummary::kCallOnSlowPath :
- LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction,
+ object_field_get_with_read_barrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
// We need a temporary register for the read barrier marking slow
@@ -2386,7 +2351,7 @@
void LocationsBuilderARM64::HandleFieldSet(HInstruction* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
if (IsConstantZeroBitPattern(instruction->InputAt(1))) {
locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant()));
@@ -2493,7 +2458,7 @@
void LocationsBuilderARM64::HandleShift(HBinaryOperation* instr) {
DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instr);
DataType::Type type = instr->GetResultType();
switch (type) {
case DataType::Type::kInt32:
@@ -2564,7 +2529,7 @@
void LocationsBuilderARM64::VisitBitwiseNegatedRight(HBitwiseNegatedRight* instr) {
DCHECK(DataType::IsIntegralType(instr->GetType())) << instr->GetType();
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instr);
locations->SetInAt(0, Location::RequiresRegister());
// There is no immediate variant of negated bitwise instructions in AArch64.
locations->SetInAt(1, Location::RequiresRegister());
@@ -2596,7 +2561,7 @@
DCHECK(instruction->GetType() == DataType::Type::kInt32 ||
instruction->GetType() == DataType::Type::kInt64);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
if (instruction->GetInstrKind() == HInstruction::kNeg) {
locations->SetInAt(0, Location::ConstantLocation(instruction->InputAt(0)->AsConstant()));
} else {
@@ -2667,7 +2632,7 @@
void LocationsBuilderARM64::VisitIntermediateAddress(HIntermediateAddress* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->GetOffset(), instruction));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
@@ -2681,7 +2646,7 @@
void LocationsBuilderARM64::VisitIntermediateAddressIndex(HIntermediateAddressIndex* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
HIntConstant* shift = instruction->GetShift()->AsIntConstant();
@@ -2713,7 +2678,7 @@
void LocationsBuilderARM64::VisitMultiplyAccumulate(HMultiplyAccumulate* instr) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instr, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instr, LocationSummary::kNoCall);
HInstruction* accumulator = instr->InputAt(HMultiplyAccumulate::kInputAccumulatorIndex);
if (instr->GetOpKind() == HInstruction::kSub &&
accumulator->IsConstant() &&
@@ -2767,10 +2732,10 @@
bool object_array_get_with_read_barrier =
kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction,
- object_array_get_with_read_barrier ?
- LocationSummary::kCallOnSlowPath :
- LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction,
+ object_array_get_with_read_barrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
// We need a temporary register for the read barrier marking slow
@@ -2937,7 +2902,7 @@
}
void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -2961,7 +2926,7 @@
DataType::Type value_type = instruction->GetComponentType();
bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
instruction,
may_need_runtime_call_for_type_check ?
LocationSummary::kCallOnSlowPath :
@@ -3047,7 +3012,7 @@
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
if (may_need_runtime_call_for_type_check) {
- slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathARM64(instruction);
+ slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathARM64(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
vixl::aarch64::Label non_zero;
@@ -3162,7 +3127,7 @@
void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
BoundsCheckSlowPathARM64* slow_path =
- new (GetGraph()->GetArena()) BoundsCheckSlowPathARM64(instruction);
+ new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARM64(instruction);
codegen_->AddSlowPath(slow_path);
__ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1));
__ B(slow_path->GetEntryLabel(), hs);
@@ -3170,7 +3135,7 @@
void LocationsBuilderARM64::VisitClinitCheck(HClinitCheck* check) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
+ new (GetGraph()->GetAllocator()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
locations->SetInAt(0, Location::RequiresRegister());
if (check->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -3179,7 +3144,7 @@
void InstructionCodeGeneratorARM64::VisitClinitCheck(HClinitCheck* check) {
// We assume the class is not null.
- SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64(
+ SlowPathCodeARM64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathARM64(
check->GetLoadClass(), check, check->GetDexPc(), true);
codegen_->AddSlowPath(slow_path);
GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0));
@@ -3218,13 +3183,14 @@
void LocationsBuilderARM64::VisitCompare(HCompare* compare) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(compare, LocationSummary::kNoCall);
DataType::Type in_type = compare->InputAt(0)->GetType();
switch (in_type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
- case DataType::Type::kInt16:
case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
case DataType::Type::kInt32:
case DataType::Type::kInt64: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -3255,9 +3221,10 @@
// -1 if: left < right
switch (in_type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
- case DataType::Type::kInt16:
case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
case DataType::Type::kInt32:
case DataType::Type::kInt64: {
Register result = OutputRegister(compare);
@@ -3282,7 +3249,7 @@
}
void LocationsBuilderARM64::HandleCondition(HCondition* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
if (DataType::IsFloatingPointType(instruction->InputAt(0)->GetType())) {
locations->SetInAt(0, Location::RequiresFpuRegister());
@@ -3488,7 +3455,7 @@
void LocationsBuilderARM64::VisitDiv(HDiv* div) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(div, LocationSummary::kNoCall);
switch (div->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64:
@@ -3534,7 +3501,7 @@
void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM64(instruction);
+ new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathARM64(instruction);
codegen_->AddSlowPath(slow_path);
Location value = instruction->GetLocations()->InAt(0);
@@ -3560,7 +3527,7 @@
void LocationsBuilderARM64::VisitDoubleConstant(HDoubleConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -3578,7 +3545,7 @@
void LocationsBuilderARM64::VisitFloatConstant(HFloatConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -3732,7 +3699,7 @@
}
void LocationsBuilderARM64::VisitIf(HIf* if_instr) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(if_instr);
if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) {
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -3753,7 +3720,7 @@
}
void LocationsBuilderARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
InvokeRuntimeCallingConvention calling_convention;
RegisterSet caller_saves = RegisterSet::Empty();
@@ -3774,7 +3741,7 @@
}
void LocationsBuilderARM64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(flag, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -3796,7 +3763,7 @@
}
void LocationsBuilderARM64::VisitSelect(HSelect* select) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(select);
if (DataType::IsFloatingPointType(select->GetType())) {
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::RequiresFpuRegister());
@@ -3865,7 +3832,7 @@
}
void LocationsBuilderARM64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
- new (GetGraph()->GetArena()) LocationSummary(info);
+ new (GetGraph()->GetAllocator()) LocationSummary(info);
}
void InstructionCodeGeneratorARM64::VisitNativeDebugInfo(HNativeDebugInfo*) {
@@ -3934,7 +3901,8 @@
break;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
if (baker_read_barrier_slow_path) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -4089,8 +4057,8 @@
kWithoutReadBarrier);
__ Cmp(out, cls);
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARM64(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ B(ne, slow_path->GetEntryLabel());
__ Mov(out, 1);
@@ -4121,8 +4089,8 @@
// call to the runtime not using a type checking slow path).
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARM64(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
if (zero.IsLinked()) {
@@ -4167,7 +4135,8 @@
break;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
// Add temps for read barriers and other uses. One is used by TypeCheckSlowPathARM64.
@@ -4209,8 +4178,8 @@
!instruction->CanThrowIntoCatchBlock();
}
SlowPathCodeARM64* type_check_slow_path =
- new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(instruction,
- is_type_check_slow_path_fatal);
+ new (GetGraph()->GetAllocator()) TypeCheckSlowPathARM64(instruction,
+ is_type_check_slow_path_fatal);
codegen_->AddSlowPath(type_check_slow_path);
vixl::aarch64::Label done;
@@ -4378,7 +4347,7 @@
}
void LocationsBuilderARM64::VisitIntConstant(HIntConstant* constant) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -4387,7 +4356,7 @@
}
void LocationsBuilderARM64::VisitNullConstant(HNullConstant* constant) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -4478,7 +4447,7 @@
}
void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
- IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena(), codegen_);
+ IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetAllocator(), codegen_);
if (intrinsic.TryDispatch(invoke)) {
return;
}
@@ -4491,7 +4460,7 @@
// art::PrepareForRegisterAllocation.
DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
- IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena(), codegen_);
+ IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetAllocator(), codegen_);
if (intrinsic.TryDispatch(invoke)) {
return;
}
@@ -4902,7 +4871,7 @@
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(cls, call_kind);
if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -5043,7 +5012,7 @@
bool do_clinit = cls->MustGenerateClinitCheck();
if (generate_null_check || do_clinit) {
DCHECK(cls->CanCallRuntime());
- SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64(
+ SlowPathCodeARM64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathARM64(
cls, cls, cls->GetDexPc(), do_clinit, bss_entry_temp, bss_entry_adrp_label);
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
@@ -5064,7 +5033,7 @@
void LocationsBuilderARM64::VisitLoadException(HLoadException* load) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(load, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -5073,7 +5042,7 @@
}
void LocationsBuilderARM64::VisitClearException(HClearException* clear) {
- new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(clear, LocationSummary::kNoCall);
}
void InstructionCodeGeneratorARM64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
@@ -5100,7 +5069,7 @@
void LocationsBuilderARM64::VisitLoadString(HLoadString* load) {
LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind);
if (load->GetLoadKind() == HLoadString::LoadKind::kRuntimeCall) {
InvokeRuntimeCallingConvention calling_convention;
locations->SetOut(calling_convention.GetReturnLocation(load->GetType()));
@@ -5183,7 +5152,7 @@
ldr_label,
kCompilerReadBarrierOption);
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetArena()) LoadStringSlowPathARM64(load, temp, adrp_label);
+ new (GetGraph()->GetAllocator()) LoadStringSlowPathARM64(load, temp, adrp_label);
codegen_->AddSlowPath(slow_path);
__ Cbz(out.X(), slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -5216,7 +5185,7 @@
}
void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -5225,8 +5194,8 @@
}
void LocationsBuilderARM64::VisitMonitorOperation(HMonitorOperation* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
}
@@ -5245,7 +5214,7 @@
void LocationsBuilderARM64::VisitMul(HMul* mul) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(mul, LocationSummary::kNoCall);
switch (mul->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64:
@@ -5285,7 +5254,7 @@
void LocationsBuilderARM64::VisitNeg(HNeg* neg) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall);
switch (neg->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64:
@@ -5322,8 +5291,8 @@
}
void LocationsBuilderARM64::VisitNewArray(HNewArray* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetOut(LocationFrom(x0));
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
@@ -5341,8 +5310,8 @@
}
void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
if (instruction->IsStringAlloc()) {
locations->AddTemp(LocationFrom(kArtMethodRegister));
@@ -5378,7 +5347,7 @@
}
void LocationsBuilderARM64::VisitNot(HNot* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -5396,7 +5365,7 @@
}
void LocationsBuilderARM64::VisitBooleanNot(HBooleanNot* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -5424,7 +5393,8 @@
}
void CodeGeneratorARM64::GenerateExplicitNullCheck(HNullCheck* instruction) {
- SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM64(instruction);
+ SlowPathCodeARM64* slow_path =
+ new (GetGraph()->GetAllocator()) NullCheckSlowPathARM64(instruction);
AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -5454,7 +5424,7 @@
}
void LocationsBuilderARM64::VisitParameterValue(HParameterValue* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
if (location.IsStackSlot()) {
location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
@@ -5471,7 +5441,7 @@
void LocationsBuilderARM64::VisitCurrentMethod(HCurrentMethod* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetOut(LocationFrom(kArtMethodRegister));
}
@@ -5481,7 +5451,7 @@
}
void LocationsBuilderARM64::VisitPhi(HPhi* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
locations->SetInAt(i, Location::Any());
}
@@ -5497,7 +5467,7 @@
LocationSummary::CallKind call_kind =
DataType::IsFloatingPointType(type) ? LocationSummary::kCallOnMainOnly
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(rem, call_kind);
switch (type) {
case DataType::Type::kInt32:
@@ -5569,7 +5539,7 @@
}
void LocationsBuilderARM64::VisitReturn(HReturn* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
DataType::Type return_type = instruction->InputAt(0)->GetType();
locations->SetInAt(0, ARM64ReturnLocation(return_type));
}
@@ -5703,8 +5673,8 @@
}
void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
// In suspend check slow path, usually there are no caller-save registers at all.
// If SIMD instructions are present, however, we force spilling all live SIMD
// registers in full width (since the runtime only saves/restores lower part).
@@ -5728,8 +5698,8 @@
}
void LocationsBuilderARM64::VisitThrow(HThrow* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
}
@@ -5741,10 +5711,11 @@
void LocationsBuilderARM64::VisitTypeConversion(HTypeConversion* conversion) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(conversion, LocationSummary::kNoCall);
DataType::Type input_type = conversion->GetInputType();
DataType::Type result_type = conversion->GetResultType();
- DCHECK_NE(input_type, result_type);
+ DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type))
+ << input_type << " -> " << result_type;
if ((input_type == DataType::Type::kReference) || (input_type == DataType::Type::kVoid) ||
(result_type == DataType::Type::kReference) || (result_type == DataType::Type::kVoid)) {
LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
@@ -5767,7 +5738,8 @@
DataType::Type result_type = conversion->GetResultType();
DataType::Type input_type = conversion->GetInputType();
- DCHECK_NE(input_type, result_type);
+ DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type))
+ << input_type << " -> " << result_type;
if (DataType::IsIntegralType(result_type) && DataType::IsIntegralType(input_type)) {
int result_size = DataType::Size(result_type);
@@ -5784,11 +5756,9 @@
// 32bit input value as a 64bit value assuming that the top 32 bits are
// zero.
__ Mov(output.W(), source.W());
- } else if (result_type == DataType::Type::kUint16 ||
- (input_type == DataType::Type::kUint16 && input_size < result_size)) {
- __ Ubfx(output,
- output.IsX() ? source.X() : source.W(),
- 0, DataType::Size(DataType::Type::kUint16) * kBitsPerByte);
+ } else if (DataType::IsUnsignedType(result_type) ||
+ (DataType::IsUnsignedType(input_type) && input_size < result_size)) {
+ __ Ubfx(output, output.IsX() ? source.X() : source.W(), 0, result_size * kBitsPerByte);
} else {
__ Sbfx(output, output.IsX() ? source.X() : source.W(), 0, min_size * kBitsPerByte);
}
@@ -5835,7 +5805,7 @@
// Simple implementation of packed switch - generate cascaded compare/jumps.
void LocationsBuilderARM64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -6059,7 +6029,7 @@
// Slow path marking the GC root `root`. The entrypoint will
// be loaded by the slow path code.
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathARM64(instruction, root);
+ new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathARM64(instruction, root);
codegen_->AddSlowPath(slow_path);
// /* GcRoot<mirror::Object> */ root = *(obj + offset)
@@ -6318,7 +6288,7 @@
// Slow path marking the object `ref` when the GC is marking. The
// entrypoint will be loaded by the slow path code.
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetArena()) LoadReferenceWithBakerReadBarrierSlowPathARM64(
+ new (GetGraph()->GetAllocator()) LoadReferenceWithBakerReadBarrierSlowPathARM64(
instruction,
ref,
obj,
@@ -6376,7 +6346,7 @@
// Slow path updating the object reference at address `obj + field_offset`
// when the GC is marking. The entrypoint will be loaded by the slow path code.
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetArena()) LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARM64(
+ new (GetGraph()->GetAllocator()) LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARM64(
instruction,
ref,
obj,
@@ -6503,7 +6473,7 @@
// not used by the artReadBarrierSlow entry point.
//
// TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
- SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena())
+ SlowPathCodeARM64* slow_path = new (GetGraph()->GetAllocator())
ReadBarrierForHeapReferenceSlowPathARM64(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
@@ -6539,7 +6509,7 @@
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathARM64(instruction, out, root);
+ new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathARM64(instruction, out, root);
AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
@@ -6548,7 +6518,7 @@
void LocationsBuilderARM64::VisitClassTableGet(HClassTableGet* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 21da955..e53773c 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -489,7 +489,7 @@
uint32_t GetPreferredSlotsAlignment() const OVERRIDE { return vixl::aarch64::kXRegSizeInBytes; }
JumpTableARM64* CreateJumpTable(HPackedSwitch* switch_instr) {
- jump_tables_.emplace_back(new (GetGraph()->GetArena()) JumpTableARM64(switch_instr));
+ jump_tables_.emplace_back(new (GetGraph()->GetAllocator()) JumpTableARM64(switch_instr));
return jump_tables_.back().get();
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 2b9e0fe..d7137a3 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -282,6 +282,58 @@
return stack_offset;
}
+static LoadOperandType GetLoadOperandType(DataType::Type type) {
+ switch (type) {
+ case DataType::Type::kReference:
+ return kLoadWord;
+ case DataType::Type::kBool:
+ case DataType::Type::kUint8:
+ return kLoadUnsignedByte;
+ case DataType::Type::kInt8:
+ return kLoadSignedByte;
+ case DataType::Type::kUint16:
+ return kLoadUnsignedHalfword;
+ case DataType::Type::kInt16:
+ return kLoadSignedHalfword;
+ case DataType::Type::kInt32:
+ return kLoadWord;
+ case DataType::Type::kInt64:
+ return kLoadWordPair;
+ case DataType::Type::kFloat32:
+ return kLoadSWord;
+ case DataType::Type::kFloat64:
+ return kLoadDWord;
+ default:
+ LOG(FATAL) << "Unreachable type " << type;
+ UNREACHABLE();
+ }
+}
+
+static StoreOperandType GetStoreOperandType(DataType::Type type) {
+ switch (type) {
+ case DataType::Type::kReference:
+ return kStoreWord;
+ case DataType::Type::kBool:
+ case DataType::Type::kUint8:
+ case DataType::Type::kInt8:
+ return kStoreByte;
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
+ return kStoreHalfword;
+ case DataType::Type::kInt32:
+ return kStoreWord;
+ case DataType::Type::kInt64:
+ return kStoreWordPair;
+ case DataType::Type::kFloat32:
+ return kStoreSWord;
+ case DataType::Type::kFloat64:
+ return kStoreDWord;
+ default:
+ LOG(FATAL) << "Unreachable type " << type;
+ UNREACHABLE();
+ }
+}
+
void SlowPathCodeARMVIXL::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
size_t orig_offset = stack_offset;
@@ -711,7 +763,7 @@
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(
locations->InAt(0),
LocationFrom(calling_convention.GetRegisterAt(0)),
@@ -1362,7 +1414,7 @@
// We're moving two or three locations to locations that could
// overlap, so we need a parallel move resolver.
InvokeRuntimeCallingConventionARMVIXL calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(ref_,
LocationFrom(calling_convention.GetRegisterAt(0)),
DataType::Type::kReference,
@@ -2369,26 +2421,26 @@
ComputeSRegisterListMask(kFpuCalleeSaves),
compiler_options,
stats),
- block_labels_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ block_labels_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ jump_tables_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
location_builder_(graph, this),
instruction_visitor_(graph, this),
- move_resolver_(graph->GetArena(), this),
- assembler_(graph->GetArena()),
+ move_resolver_(graph->GetAllocator(), this),
+ assembler_(graph->GetAllocator()),
isa_features_(isa_features),
uint32_literals_(std::less<uint32_t>(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- string_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- baker_read_barrier_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ baker_read_barrier_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_class_patches_(TypeReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
// Always save the LR register to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(LR));
// Give D30 and D31 as scratch register to VIXL. The register allocator only works on
@@ -2598,12 +2650,13 @@
Location InvokeDexCallingConventionVisitorARMVIXL::GetNextLocation(DataType::Type type) {
switch (type) {
+ case DataType::Type::kReference:
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
- case DataType::Type::kInt32:
- case DataType::Type::kReference: {
+ case DataType::Type::kInt32: {
uint32_t index = gp_index_++;
uint32_t stack_index = stack_index_++;
if (index < calling_convention.GetNumberOfRegisters()) {
@@ -2674,12 +2727,13 @@
Location InvokeDexCallingConventionVisitorARMVIXL::GetReturnLocation(DataType::Type type) const {
switch (type) {
+ case DataType::Type::kReference:
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
- case DataType::Type::kInt32:
- case DataType::Type::kReference: {
+ case DataType::Type::kInt32: {
return LocationFrom(r0);
}
@@ -2756,7 +2810,7 @@
void CodeGeneratorARMVIXL::MoveLocation(Location dst, Location src, DataType::Type dst_type) {
// TODO(VIXL): Maybe refactor to have the 'move' implementation here and use it in
// `ParallelMoveResolverARMVIXL::EmitMove`, as is done in the `arm64` backend.
- HParallelMove move(GetGraph()->GetArena());
+ HParallelMove move(GetGraph()->GetAllocator());
move.AddMove(src, dst, dst_type, nullptr);
GetMoveResolver()->EmitNativeCode(&move);
}
@@ -2976,7 +3030,7 @@
}
void LocationsBuilderARMVIXL::VisitIf(HIf* if_instr) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(if_instr);
if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) {
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -2993,7 +3047,7 @@
}
void LocationsBuilderARMVIXL::VisitDeoptimize(HDeoptimize* deoptimize) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
RegisterSet caller_saves = RegisterSet::Empty();
@@ -3014,7 +3068,7 @@
}
void LocationsBuilderARMVIXL::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(flag, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -3027,7 +3081,7 @@
}
void LocationsBuilderARMVIXL::VisitSelect(HSelect* select) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(select);
const bool is_floating_point = DataType::IsFloatingPointType(select->GetType());
if (is_floating_point) {
@@ -3168,7 +3222,7 @@
}
void LocationsBuilderARMVIXL::VisitNativeDebugInfo(HNativeDebugInfo* info) {
- new (GetGraph()->GetArena()) LocationSummary(info);
+ new (GetGraph()->GetAllocator()) LocationSummary(info);
}
void InstructionCodeGeneratorARMVIXL::VisitNativeDebugInfo(HNativeDebugInfo*) {
@@ -3258,7 +3312,7 @@
void LocationsBuilderARMVIXL::HandleCondition(HCondition* cond) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(cond, LocationSummary::kNoCall);
// Handle the long/FP comparisons made in instruction simplification.
switch (cond->InputAt(0)->GetType()) {
case DataType::Type::kInt64:
@@ -3417,7 +3471,7 @@
void LocationsBuilderARMVIXL::VisitIntConstant(HIntConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -3427,7 +3481,7 @@
void LocationsBuilderARMVIXL::VisitNullConstant(HNullConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -3437,7 +3491,7 @@
void LocationsBuilderARMVIXL::VisitLongConstant(HLongConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -3447,7 +3501,7 @@
void LocationsBuilderARMVIXL::VisitFloatConstant(HFloatConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -3458,7 +3512,7 @@
void LocationsBuilderARMVIXL::VisitDoubleConstant(HDoubleConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -3494,7 +3548,7 @@
void LocationsBuilderARMVIXL::VisitReturn(HReturn* ret) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(ret, LocationSummary::kNoCall);
locations->SetInAt(0, parameter_visitor_.GetReturnLocation(ret->InputAt(0)->GetType()));
}
@@ -3668,7 +3722,7 @@
void LocationsBuilderARMVIXL::VisitNeg(HNeg* neg) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall);
switch (neg->GetResultType()) {
case DataType::Type::kInt32: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -3728,7 +3782,8 @@
void LocationsBuilderARMVIXL::VisitTypeConversion(HTypeConversion* conversion) {
DataType::Type result_type = conversion->GetResultType();
DataType::Type input_type = conversion->GetInputType();
- DCHECK_NE(result_type, input_type);
+ DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type))
+ << input_type << " -> " << result_type;
// The float-to-long, double-to-long and long-to-float type conversions
// rely on a call to the runtime.
@@ -3739,69 +3794,32 @@
? LocationSummary::kCallOnMainOnly
: LocationSummary::kNoCall;
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
-
- // The Java language does not allow treating boolean as an integral type but
- // our bit representation makes it safe.
+ new (GetGraph()->GetAllocator()) LocationSummary(conversion, call_kind);
switch (result_type) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
- switch (input_type) {
- case DataType::Type::kInt64:
- // Type conversion from long to byte is a result of code transformations.
- case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
- case DataType::Type::kInt16:
- case DataType::Type::kInt32:
- case DataType::Type::kUint16:
- // Processing a Dex `int-to-byte' instruction.
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
- break;
-
- default:
- LOG(FATAL) << "Unexpected type conversion from " << input_type
- << " to " << result_type;
- }
- break;
-
+ case DataType::Type::kUint16:
case DataType::Type::kInt16:
- switch (input_type) {
- case DataType::Type::kInt64:
- // Type conversion from long to short is a result of code transformations.
- case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
- case DataType::Type::kInt8:
- case DataType::Type::kInt32:
- case DataType::Type::kUint16:
- // Processing a Dex `int-to-short' instruction.
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
- break;
-
- default:
- LOG(FATAL) << "Unexpected type conversion from " << input_type
- << " to " << result_type;
- }
+ DCHECK(DataType::IsIntegralType(input_type)) << input_type;
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
case DataType::Type::kInt32:
switch (input_type) {
case DataType::Type::kInt64:
- // Processing a Dex `long-to-int' instruction.
locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
case DataType::Type::kFloat32:
- // Processing a Dex `float-to-int' instruction.
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
locations->AddTemp(Location::RequiresFpuRegister());
break;
case DataType::Type::kFloat64:
- // Processing a Dex `double-to-int' instruction.
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
locations->AddTemp(Location::RequiresFpuRegister());
@@ -3816,18 +3834,16 @@
case DataType::Type::kInt64:
switch (input_type) {
case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
case DataType::Type::kInt16:
case DataType::Type::kInt32:
- case DataType::Type::kUint16:
- // Processing a Dex `int-to-long' instruction.
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
case DataType::Type::kFloat32: {
- // Processing a Dex `float-to-long' instruction.
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(LocationFrom(r0, r1));
@@ -3835,7 +3851,6 @@
}
case DataType::Type::kFloat64: {
- // Processing a Dex `double-to-long' instruction.
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetFpuRegisterAt(0),
calling_convention.GetFpuRegisterAt(1)));
@@ -3849,41 +3864,19 @@
}
break;
- case DataType::Type::kUint16:
- switch (input_type) {
- case DataType::Type::kInt64:
- // Type conversion from long to char is a result of code transformations.
- case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
- case DataType::Type::kInt8:
- case DataType::Type::kInt16:
- case DataType::Type::kInt32:
- // Processing a Dex `int-to-char' instruction.
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
- break;
-
- default:
- LOG(FATAL) << "Unexpected type conversion from " << input_type
- << " to " << result_type;
- }
- break;
-
case DataType::Type::kFloat32:
switch (input_type) {
case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
case DataType::Type::kInt16:
case DataType::Type::kInt32:
- case DataType::Type::kUint16:
- // Processing a Dex `int-to-float' instruction.
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresFpuRegister());
break;
case DataType::Type::kInt64: {
- // Processing a Dex `long-to-float' instruction.
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0),
calling_convention.GetRegisterAt(1)));
@@ -3892,7 +3885,6 @@
}
case DataType::Type::kFloat64:
- // Processing a Dex `double-to-float' instruction.
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
break;
@@ -3906,18 +3898,16 @@
case DataType::Type::kFloat64:
switch (input_type) {
case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
case DataType::Type::kInt16:
case DataType::Type::kInt32:
- case DataType::Type::kUint16:
- // Processing a Dex `int-to-double' instruction.
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresFpuRegister());
break;
case DataType::Type::kInt64:
- // Processing a Dex `long-to-double' instruction.
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresFpuRegister());
locations->AddTemp(Location::RequiresFpuRegister());
@@ -3925,7 +3915,6 @@
break;
case DataType::Type::kFloat32:
- // Processing a Dex `float-to-double' instruction.
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
break;
@@ -3948,22 +3937,55 @@
Location in = locations->InAt(0);
DataType::Type result_type = conversion->GetResultType();
DataType::Type input_type = conversion->GetInputType();
- DCHECK_NE(result_type, input_type);
+ DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type))
+ << input_type << " -> " << result_type;
switch (result_type) {
- case DataType::Type::kInt8:
+ case DataType::Type::kUint8:
switch (input_type) {
- case DataType::Type::kInt64:
- // Type conversion from long to byte is a result of code transformations.
- __ Sbfx(OutputRegister(conversion), LowRegisterFrom(in), 0, 8);
- break;
- case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
+ case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
case DataType::Type::kInt16:
case DataType::Type::kInt32:
+ __ Ubfx(OutputRegister(conversion), InputRegisterAt(conversion, 0), 0, 8);
+ break;
+ case DataType::Type::kInt64:
+ __ Ubfx(OutputRegister(conversion), LowRegisterFrom(in), 0, 8);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case DataType::Type::kInt8:
+ switch (input_type) {
+ case DataType::Type::kUint8:
case DataType::Type::kUint16:
- // Processing a Dex `int-to-byte' instruction.
+ case DataType::Type::kInt16:
+ case DataType::Type::kInt32:
__ Sbfx(OutputRegister(conversion), InputRegisterAt(conversion, 0), 0, 8);
break;
+ case DataType::Type::kInt64:
+ __ Sbfx(OutputRegister(conversion), LowRegisterFrom(in), 0, 8);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case DataType::Type::kUint16:
+ switch (input_type) {
+ case DataType::Type::kInt8:
+ case DataType::Type::kInt16:
+ case DataType::Type::kInt32:
+ __ Ubfx(OutputRegister(conversion), InputRegisterAt(conversion, 0), 0, 16);
+ break;
+ case DataType::Type::kInt64:
+ __ Ubfx(OutputRegister(conversion), LowRegisterFrom(in), 0, 16);
+ break;
default:
LOG(FATAL) << "Unexpected type conversion from " << input_type
@@ -3973,18 +3995,13 @@
case DataType::Type::kInt16:
switch (input_type) {
- case DataType::Type::kInt64:
- // Type conversion from long to short is a result of code transformations.
- __ Sbfx(OutputRegister(conversion), LowRegisterFrom(in), 0, 16);
- break;
- case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
- case DataType::Type::kInt8:
- case DataType::Type::kInt32:
case DataType::Type::kUint16:
- // Processing a Dex `int-to-short' instruction.
+ case DataType::Type::kInt32:
__ Sbfx(OutputRegister(conversion), InputRegisterAt(conversion, 0), 0, 16);
break;
+ case DataType::Type::kInt64:
+ __ Sbfx(OutputRegister(conversion), LowRegisterFrom(in), 0, 16);
+ break;
default:
LOG(FATAL) << "Unexpected type conversion from " << input_type
@@ -3995,7 +4012,6 @@
case DataType::Type::kInt32:
switch (input_type) {
case DataType::Type::kInt64:
- // Processing a Dex `long-to-int' instruction.
DCHECK(out.IsRegister());
if (in.IsRegisterPair()) {
__ Mov(OutputRegister(conversion), LowRegisterFrom(in));
@@ -4013,7 +4029,6 @@
break;
case DataType::Type::kFloat32: {
- // Processing a Dex `float-to-int' instruction.
vixl32::SRegister temp = LowSRegisterFrom(locations->GetTemp(0));
__ Vcvt(S32, F32, temp, InputSRegisterAt(conversion, 0));
__ Vmov(OutputRegister(conversion), temp);
@@ -4021,7 +4036,6 @@
}
case DataType::Type::kFloat64: {
- // Processing a Dex `double-to-int' instruction.
vixl32::SRegister temp_s = LowSRegisterFrom(locations->GetTemp(0));
__ Vcvt(S32, F64, temp_s, DRegisterFrom(in));
__ Vmov(OutputRegister(conversion), temp_s);
@@ -4037,12 +4051,11 @@
case DataType::Type::kInt64:
switch (input_type) {
case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
case DataType::Type::kInt16:
case DataType::Type::kInt32:
- case DataType::Type::kUint16:
- // Processing a Dex `int-to-long' instruction.
DCHECK(out.IsRegisterPair());
DCHECK(in.IsRegister());
__ Mov(LowRegisterFrom(out), InputRegisterAt(conversion, 0));
@@ -4051,13 +4064,11 @@
break;
case DataType::Type::kFloat32:
- // Processing a Dex `float-to-long' instruction.
codegen_->InvokeRuntime(kQuickF2l, conversion, conversion->GetDexPc());
CheckEntrypointTypes<kQuickF2l, int64_t, float>();
break;
case DataType::Type::kFloat64:
- // Processing a Dex `double-to-long' instruction.
codegen_->InvokeRuntime(kQuickD2l, conversion, conversion->GetDexPc());
CheckEntrypointTypes<kQuickD2l, int64_t, double>();
break;
@@ -4068,49 +4079,24 @@
}
break;
- case DataType::Type::kUint16:
- switch (input_type) {
- case DataType::Type::kInt64:
- // Type conversion from long to char is a result of code transformations.
- __ Ubfx(OutputRegister(conversion), LowRegisterFrom(in), 0, 16);
- break;
- case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
- case DataType::Type::kInt8:
- case DataType::Type::kInt16:
- case DataType::Type::kInt32:
- // Processing a Dex `int-to-char' instruction.
- __ Ubfx(OutputRegister(conversion), InputRegisterAt(conversion, 0), 0, 16);
- break;
-
- default:
- LOG(FATAL) << "Unexpected type conversion from " << input_type
- << " to " << result_type;
- }
- break;
-
case DataType::Type::kFloat32:
switch (input_type) {
case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
case DataType::Type::kInt16:
case DataType::Type::kInt32:
- case DataType::Type::kUint16: {
- // Processing a Dex `int-to-float' instruction.
__ Vmov(OutputSRegister(conversion), InputRegisterAt(conversion, 0));
__ Vcvt(F32, S32, OutputSRegister(conversion), OutputSRegister(conversion));
break;
- }
case DataType::Type::kInt64:
- // Processing a Dex `long-to-float' instruction.
codegen_->InvokeRuntime(kQuickL2f, conversion, conversion->GetDexPc());
CheckEntrypointTypes<kQuickL2f, float, int64_t>();
break;
case DataType::Type::kFloat64:
- // Processing a Dex `double-to-float' instruction.
__ Vcvt(F32, F64, OutputSRegister(conversion), DRegisterFrom(in));
break;
@@ -4123,19 +4109,16 @@
case DataType::Type::kFloat64:
switch (input_type) {
case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
case DataType::Type::kInt16:
case DataType::Type::kInt32:
- case DataType::Type::kUint16: {
- // Processing a Dex `int-to-double' instruction.
__ Vmov(LowSRegisterFrom(out), InputRegisterAt(conversion, 0));
__ Vcvt(F64, S32, DRegisterFrom(out), LowSRegisterFrom(out));
break;
- }
case DataType::Type::kInt64: {
- // Processing a Dex `long-to-double' instruction.
vixl32::Register low = LowRegisterFrom(in);
vixl32::Register high = HighRegisterFrom(in);
vixl32::SRegister out_s = LowSRegisterFrom(out);
@@ -4158,7 +4141,6 @@
}
case DataType::Type::kFloat32:
- // Processing a Dex `float-to-double' instruction.
__ Vcvt(F64, F32, DRegisterFrom(out), InputSRegisterAt(conversion, 0));
break;
@@ -4176,7 +4158,7 @@
void LocationsBuilderARMVIXL::VisitAdd(HAdd* add) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(add, LocationSummary::kNoCall);
switch (add->GetResultType()) {
case DataType::Type::kInt32: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -4241,7 +4223,7 @@
void LocationsBuilderARMVIXL::VisitSub(HSub* sub) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(sub, LocationSummary::kNoCall);
switch (sub->GetResultType()) {
case DataType::Type::kInt32: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -4303,7 +4285,7 @@
void LocationsBuilderARMVIXL::VisitMul(HMul* mul) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(mul, LocationSummary::kNoCall);
switch (mul->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64: {
@@ -4512,7 +4494,7 @@
call_kind = LocationSummary::kCallOnMainOnly;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(div, call_kind);
switch (div->GetResultType()) {
case DataType::Type::kInt32: {
@@ -4625,7 +4607,7 @@
call_kind = LocationSummary::kNoCall;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(rem, call_kind);
switch (type) {
case DataType::Type::kInt32: {
@@ -4752,7 +4734,7 @@
void InstructionCodeGeneratorARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instruction) {
DivZeroCheckSlowPathARMVIXL* slow_path =
- new (GetGraph()->GetArena()) DivZeroCheckSlowPathARMVIXL(instruction);
+ new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathARMVIXL(instruction);
codegen_->AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -4760,6 +4742,7 @@
switch (instruction->GetType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -4889,7 +4872,7 @@
void LocationsBuilderARMVIXL::VisitRor(HRor* ror) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(ror, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(ror, LocationSummary::kNoCall);
switch (ror->GetResultType()) {
case DataType::Type::kInt32: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -4935,7 +4918,7 @@
DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(op, LocationSummary::kNoCall);
switch (op->GetResultType()) {
case DataType::Type::kInt32: {
@@ -5165,8 +5148,8 @@
}
void LocationsBuilderARMVIXL::VisitNewInstance(HNewInstance* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
if (instruction->IsStringAlloc()) {
locations->AddTemp(LocationFrom(kMethodRegister));
} else {
@@ -5199,8 +5182,8 @@
}
void LocationsBuilderARMVIXL::VisitNewArray(HNewArray* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetOut(LocationFrom(r0));
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
@@ -5220,7 +5203,7 @@
void LocationsBuilderARMVIXL::VisitParameterValue(HParameterValue* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
if (location.IsStackSlot()) {
location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
@@ -5237,7 +5220,7 @@
void LocationsBuilderARMVIXL::VisitCurrentMethod(HCurrentMethod* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetOut(LocationFrom(kMethodRegister));
}
@@ -5248,7 +5231,7 @@
void LocationsBuilderARMVIXL::VisitNot(HNot* not_) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(not_, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -5274,7 +5257,7 @@
void LocationsBuilderARMVIXL::VisitBooleanNot(HBooleanNot* bool_not) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(bool_not, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(bool_not, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -5285,12 +5268,13 @@
void LocationsBuilderARMVIXL::VisitCompare(HCompare* compare) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(compare, LocationSummary::kNoCall);
switch (compare->InputAt(0)->GetType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
- case DataType::Type::kInt16:
case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
case DataType::Type::kInt32:
case DataType::Type::kInt64: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -5323,9 +5307,10 @@
vixl32::Condition less_cond = vixl32::Condition(kNone);
switch (type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
- case DataType::Type::kInt16:
case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
case DataType::Type::kInt32: {
// Emit move to `out` before the `Cmp`, as `Mov` might affect the status flags.
__ Mov(out, 0);
@@ -5374,7 +5359,7 @@
void LocationsBuilderARMVIXL::VisitPhi(HPhi* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
locations->SetInAt(i, Location::Any());
}
@@ -5452,7 +5437,7 @@
DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
DataType::Type field_type = field_info.GetFieldType();
@@ -5513,18 +5498,16 @@
switch (field_type) {
case DataType::Type::kBool:
- case DataType::Type::kInt8: {
- GetAssembler()->StoreToOffset(kStoreByte, RegisterFrom(value), base, offset);
- break;
- }
-
+ case DataType::Type::kUint8:
+ case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
case DataType::Type::kInt16:
- case DataType::Type::kUint16: {
- GetAssembler()->StoreToOffset(kStoreHalfword, RegisterFrom(value), base, offset);
+ case DataType::Type::kInt32: {
+ StoreOperandType operand_type = GetStoreOperandType(field_type);
+ GetAssembler()->StoreToOffset(operand_type, RegisterFrom(value), base, offset);
break;
}
- case DataType::Type::kInt32:
case DataType::Type::kReference: {
if (kPoisonHeapReferences && needs_write_barrier) {
// Note that in the case where `value` is a null reference,
@@ -5617,10 +5600,10 @@
bool object_field_get_with_read_barrier =
kEmitCompilerReadBarrier && (field_info.GetFieldType() == DataType::Type::kReference);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction,
- object_field_get_with_read_barrier ?
- LocationSummary::kCallOnSlowPath :
- LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction,
+ object_field_get_with_read_barrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -5764,24 +5747,15 @@
switch (field_type) {
case DataType::Type::kBool:
- GetAssembler()->LoadFromOffset(kLoadUnsignedByte, RegisterFrom(out), base, offset);
- break;
-
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
- GetAssembler()->LoadFromOffset(kLoadSignedByte, RegisterFrom(out), base, offset);
- break;
-
- case DataType::Type::kInt16:
- GetAssembler()->LoadFromOffset(kLoadSignedHalfword, RegisterFrom(out), base, offset);
- break;
-
case DataType::Type::kUint16:
- GetAssembler()->LoadFromOffset(kLoadUnsignedHalfword, RegisterFrom(out), base, offset);
+ case DataType::Type::kInt16:
+ case DataType::Type::kInt32: {
+ LoadOperandType operand_type = GetLoadOperandType(field_type);
+ GetAssembler()->LoadFromOffset(operand_type, RegisterFrom(out), base, offset);
break;
-
- case DataType::Type::kInt32:
- GetAssembler()->LoadFromOffset(kLoadWord, RegisterFrom(out), base, offset);
- break;
+ }
case DataType::Type::kReference: {
// /* HeapReference<Object> */ out = *(base + offset)
@@ -5986,7 +5960,7 @@
void CodeGeneratorARMVIXL::GenerateExplicitNullCheck(HNullCheck* instruction) {
NullCheckSlowPathARMVIXL* slow_path =
- new (GetGraph()->GetArena()) NullCheckSlowPathARMVIXL(instruction);
+ new (GetGraph()->GetAllocator()) NullCheckSlowPathARMVIXL(instruction);
AddSlowPath(slow_path);
__ CompareAndBranchIfZero(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel());
}
@@ -5995,56 +5969,6 @@
codegen_->GenerateNullCheck(instruction);
}
-static LoadOperandType GetLoadOperandType(DataType::Type type) {
- switch (type) {
- case DataType::Type::kReference:
- return kLoadWord;
- case DataType::Type::kBool:
- return kLoadUnsignedByte;
- case DataType::Type::kInt8:
- return kLoadSignedByte;
- case DataType::Type::kUint16:
- return kLoadUnsignedHalfword;
- case DataType::Type::kInt16:
- return kLoadSignedHalfword;
- case DataType::Type::kInt32:
- return kLoadWord;
- case DataType::Type::kInt64:
- return kLoadWordPair;
- case DataType::Type::kFloat32:
- return kLoadSWord;
- case DataType::Type::kFloat64:
- return kLoadDWord;
- default:
- LOG(FATAL) << "Unreachable type " << type;
- UNREACHABLE();
- }
-}
-
-static StoreOperandType GetStoreOperandType(DataType::Type type) {
- switch (type) {
- case DataType::Type::kReference:
- return kStoreWord;
- case DataType::Type::kBool:
- case DataType::Type::kInt8:
- return kStoreByte;
- case DataType::Type::kUint16:
- case DataType::Type::kInt16:
- return kStoreHalfword;
- case DataType::Type::kInt32:
- return kStoreWord;
- case DataType::Type::kInt64:
- return kStoreWordPair;
- case DataType::Type::kFloat32:
- return kStoreSWord;
- case DataType::Type::kFloat64:
- return kStoreDWord;
- default:
- LOG(FATAL) << "Unreachable type " << type;
- UNREACHABLE();
- }
-}
-
void CodeGeneratorARMVIXL::LoadFromShiftedRegOffset(DataType::Type type,
Location out_loc,
vixl32::Register base,
@@ -6054,18 +5978,19 @@
MemOperand mem_address(base, reg_index, vixl32::LSL, shift_count);
switch (type) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
__ Ldrsb(cond, RegisterFrom(out_loc), mem_address);
break;
case DataType::Type::kBool:
__ Ldrb(cond, RegisterFrom(out_loc), mem_address);
break;
- case DataType::Type::kInt16:
- __ Ldrsh(cond, RegisterFrom(out_loc), mem_address);
- break;
case DataType::Type::kUint16:
__ Ldrh(cond, RegisterFrom(out_loc), mem_address);
break;
+ case DataType::Type::kInt16:
+ __ Ldrsh(cond, RegisterFrom(out_loc), mem_address);
+ break;
case DataType::Type::kReference:
case DataType::Type::kInt32:
__ Ldr(cond, RegisterFrom(out_loc), mem_address);
@@ -6089,12 +6014,13 @@
MemOperand mem_address(base, reg_index, vixl32::LSL, shift_count);
switch (type) {
- case DataType::Type::kInt8:
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
+ case DataType::Type::kInt8:
__ Strb(cond, RegisterFrom(loc), mem_address);
break;
- case DataType::Type::kInt16:
case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
__ Strh(cond, RegisterFrom(loc), mem_address);
break;
case DataType::Type::kReference:
@@ -6115,10 +6041,10 @@
bool object_array_get_with_read_barrier =
kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction,
- object_array_get_with_read_barrier ?
- LocationSummary::kCallOnSlowPath :
- LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction,
+ object_array_get_with_read_barrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -6182,9 +6108,10 @@
switch (type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
- case DataType::Type::kInt16:
case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
case DataType::Type::kInt32: {
vixl32::Register length;
if (maybe_compressed_char_at) {
@@ -6398,7 +6325,7 @@
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
instruction,
may_need_runtime_call_for_type_check ?
LocationSummary::kCallOnSlowPath :
@@ -6434,9 +6361,10 @@
switch (value_type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
- case DataType::Type::kInt16:
case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
case DataType::Type::kInt32: {
if (index.IsConstant()) {
int32_t const_index = Int32ConstantFrom(index);
@@ -6505,7 +6433,7 @@
SlowPathCodeARMVIXL* slow_path = nullptr;
if (may_need_runtime_call_for_type_check) {
- slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathARMVIXL(instruction);
+ slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathARMVIXL(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
vixl32::Label non_zero;
@@ -6679,7 +6607,7 @@
void LocationsBuilderARMVIXL::VisitArrayLength(HArrayLength* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -6703,7 +6631,7 @@
void LocationsBuilderARMVIXL::VisitIntermediateAddress(HIntermediateAddress* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instruction->GetOffset()));
@@ -6766,7 +6694,7 @@
int32_t index = Int32ConstantFrom(index_loc);
if (index < 0 || index >= length) {
SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetArena()) BoundsCheckSlowPathARMVIXL(instruction);
+ new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARMVIXL(instruction);
codegen_->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
} else {
@@ -6777,13 +6705,13 @@
}
SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetArena()) BoundsCheckSlowPathARMVIXL(instruction);
+ new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARMVIXL(instruction);
__ Cmp(RegisterFrom(index_loc), length);
codegen_->AddSlowPath(slow_path);
__ B(hs, slow_path->GetEntryLabel());
} else {
SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetArena()) BoundsCheckSlowPathARMVIXL(instruction);
+ new (GetGraph()->GetAllocator()) BoundsCheckSlowPathARMVIXL(instruction);
__ Cmp(RegisterFrom(length_loc), InputOperandAt(instruction, 0));
codegen_->AddSlowPath(slow_path);
__ B(ls, slow_path->GetEntryLabel());
@@ -6817,8 +6745,8 @@
}
void LocationsBuilderARMVIXL::VisitSuspendCheck(HSuspendCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -6842,7 +6770,8 @@
SuspendCheckSlowPathARMVIXL* slow_path =
down_cast<SuspendCheckSlowPathARMVIXL*>(instruction->GetSlowPath());
if (slow_path == nullptr) {
- slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathARMVIXL(instruction, successor);
+ slow_path =
+ new (GetGraph()->GetAllocator()) SuspendCheckSlowPathARMVIXL(instruction, successor);
instruction->SetSlowPath(slow_path);
codegen_->AddSlowPath(slow_path);
if (successor != nullptr) {
@@ -7157,7 +7086,7 @@
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(cls, call_kind);
if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -7280,7 +7209,7 @@
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
- LoadClassSlowPathARMVIXL* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARMVIXL(
+ LoadClassSlowPathARMVIXL* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathARMVIXL(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
@@ -7297,7 +7226,7 @@
void LocationsBuilderARMVIXL::VisitClinitCheck(HClinitCheck* check) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
+ new (GetGraph()->GetAllocator()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
locations->SetInAt(0, Location::RequiresRegister());
if (check->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -7307,10 +7236,10 @@
void InstructionCodeGeneratorARMVIXL::VisitClinitCheck(HClinitCheck* check) {
// We assume the class is not null.
LoadClassSlowPathARMVIXL* slow_path =
- new (GetGraph()->GetArena()) LoadClassSlowPathARMVIXL(check->GetLoadClass(),
- check,
- check->GetDexPc(),
- /* do_clinit */ true);
+ new (GetGraph()->GetAllocator()) LoadClassSlowPathARMVIXL(check->GetLoadClass(),
+ check,
+ check->GetDexPc(),
+ /* do_clinit */ true);
codegen_->AddSlowPath(slow_path);
GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0));
}
@@ -7351,7 +7280,7 @@
void LocationsBuilderARMVIXL::VisitLoadString(HLoadString* load) {
LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind);
HLoadString::LoadKind load_kind = load->GetLoadKind();
if (load_kind == HLoadString::LoadKind::kRuntimeCall) {
locations->SetOut(LocationFrom(r0));
@@ -7420,7 +7349,7 @@
codegen_->EmitMovwMovtPlaceholder(labels, temp);
GenerateGcRootFieldLoad(load, out_loc, temp, /* offset */ 0, kCompilerReadBarrierOption);
LoadStringSlowPathARMVIXL* slow_path =
- new (GetGraph()->GetArena()) LoadStringSlowPathARMVIXL(load);
+ new (GetGraph()->GetAllocator()) LoadStringSlowPathARMVIXL(load);
codegen_->AddSlowPath(slow_path);
__ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -7454,7 +7383,7 @@
void LocationsBuilderARMVIXL::VisitLoadException(HLoadException* load) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(load, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -7465,7 +7394,7 @@
void LocationsBuilderARMVIXL::VisitClearException(HClearException* clear) {
- new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(clear, LocationSummary::kNoCall);
}
void InstructionCodeGeneratorARMVIXL::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
@@ -7476,8 +7405,8 @@
}
void LocationsBuilderARMVIXL::VisitThrow(HThrow* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
}
@@ -7529,7 +7458,8 @@
break;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
if (baker_read_barrier_slow_path) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -7746,8 +7676,8 @@
kWithoutReadBarrier);
__ Cmp(out, cls);
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARMVIXL(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARMVIXL(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ B(ne, slow_path->GetEntryLabel());
__ Mov(out, 1);
@@ -7775,8 +7705,8 @@
// call to the runtime not using a type checking slow path).
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARMVIXL(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathARMVIXL(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
break;
@@ -7813,7 +7743,8 @@
break;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind));
@@ -7853,8 +7784,8 @@
!instruction->CanThrowIntoCatchBlock();
}
SlowPathCodeARMVIXL* type_check_slow_path =
- new (GetGraph()->GetArena()) TypeCheckSlowPathARMVIXL(instruction,
- is_type_check_slow_path_fatal);
+ new (GetGraph()->GetAllocator()) TypeCheckSlowPathARMVIXL(instruction,
+ is_type_check_slow_path_fatal);
codegen_->AddSlowPath(type_check_slow_path);
vixl32::Label done;
@@ -8029,8 +7960,8 @@
}
void LocationsBuilderARMVIXL::VisitMonitorOperation(HMonitorOperation* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
}
@@ -8061,7 +7992,7 @@
void LocationsBuilderARMVIXL::HandleBitwiseOperation(HBinaryOperation* instruction, Opcode opcode) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
DCHECK(instruction->GetResultType() == DataType::Type::kInt32
|| instruction->GetResultType() == DataType::Type::kInt64);
// Note: GVN reorders commutative operations to have the constant on the right hand side.
@@ -8084,7 +8015,7 @@
void LocationsBuilderARMVIXL::VisitBitwiseNegatedRight(HBitwiseNegatedRight* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
DCHECK(instruction->GetResultType() == DataType::Type::kInt32
|| instruction->GetResultType() == DataType::Type::kInt64);
@@ -8151,7 +8082,7 @@
DCHECK(instruction->GetType() == DataType::Type::kInt32 ||
instruction->GetType() == DataType::Type::kInt64);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
const bool overlap = instruction->GetType() == DataType::Type::kInt64 &&
HDataProcWithShifterOp::IsExtensionOp(instruction->GetOpKind());
@@ -8515,7 +8446,7 @@
// Slow path marking the GC root `root`. The entrypoint will
// be loaded by the slow path code.
SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathARMVIXL(instruction, root);
+ new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathARMVIXL(instruction, root);
codegen_->AddSlowPath(slow_path);
// /* GcRoot<mirror::Object> */ root = *(obj + offset)
@@ -8764,7 +8695,7 @@
// Slow path marking the object `ref` when the GC is marking. The
// entrypoint will be loaded by the slow path code.
SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetArena()) LoadReferenceWithBakerReadBarrierSlowPathARMVIXL(
+ new (GetGraph()->GetAllocator()) LoadReferenceWithBakerReadBarrierSlowPathARMVIXL(
instruction, ref, obj, offset, index, scale_factor, needs_null_check, temp_reg);
AddSlowPath(slow_path);
@@ -8810,8 +8741,8 @@
// Slow path updating the object reference at address `obj + field_offset`
// when the GC is marking. The entrypoint will be loaded by the slow path code.
- SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetArena()) LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARMVIXL(
+ SlowPathCodeARMVIXL* slow_path = new (GetGraph()->GetAllocator())
+ LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARMVIXL(
instruction,
ref,
obj,
@@ -8922,7 +8853,7 @@
// not used by the artReadBarrierSlow entry point.
//
// TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
- SlowPathCodeARMVIXL* slow_path = new (GetGraph()->GetArena())
+ SlowPathCodeARMVIXL* slow_path = new (GetGraph()->GetAllocator())
ReadBarrierForHeapReferenceSlowPathARMVIXL(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
@@ -8958,7 +8889,7 @@
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCodeARMVIXL* slow_path =
- new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathARMVIXL(instruction, out, root);
+ new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathARMVIXL(instruction, out, root);
AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
@@ -9263,7 +9194,7 @@
void LocationsBuilderARMVIXL::VisitMultiplyAccumulate(HMultiplyAccumulate* instr) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instr, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instr, LocationSummary::kNoCall);
locations->SetInAt(HMultiplyAccumulate::kInputAccumulatorIndex,
Location::RequiresRegister());
locations->SetInAt(HMultiplyAccumulate::kInputMulLeftIndex, Location::RequiresRegister());
@@ -9300,7 +9231,7 @@
// Simple implementation of packed switch - generate cascaded compare/jumps.
void LocationsBuilderARMVIXL::VisitPackedSwitch(HPackedSwitch* switch_instr) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
if (switch_instr->GetNumEntries() > kPackedSwitchCompareJumpThreshold &&
codegen_->GetAssembler()->GetVIXLAssembler()->IsUsingT32()) {
@@ -9414,7 +9345,7 @@
TODO_VIXL32(FATAL);
} else {
// Let the parallel move resolver take care of all of this.
- HParallelMove parallel_move(GetGraph()->GetArena());
+ HParallelMove parallel_move(GetGraph()->GetAllocator());
parallel_move.AddMove(return_loc, trg, type, nullptr);
GetMoveResolver()->EmitNativeCode(¶llel_move);
}
@@ -9422,7 +9353,7 @@
void LocationsBuilderARMVIXL::VisitClassTableGet(HClassTableGet* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 58b8525..c46d17c 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -119,7 +119,7 @@
explicit JumpTableARMVIXL(HPackedSwitch* switch_instr)
: switch_instr_(switch_instr),
table_start_(),
- bb_addresses_(switch_instr->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ bb_addresses_(switch_instr->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
uint32_t num_entries = switch_instr_->GetNumEntries();
for (uint32_t i = 0; i < num_entries; i++) {
VIXLInt32Literal *lit = new VIXLInt32Literal(0, vixl32::RawLiteral::kManuallyPlaced);
@@ -739,7 +739,7 @@
void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
JumpTableARMVIXL* CreateJumpTable(HPackedSwitch* switch_instr) {
- jump_tables_.emplace_back(new (GetGraph()->GetArena()) JumpTableARMVIXL(switch_instr));
+ jump_tables_.emplace_back(new (GetGraph()->GetAllocator()) JumpTableARMVIXL(switch_instr));
return jump_tables_.back().get();
}
void EmitJumpTables();
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index a7c8557..7ea7b9c 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -51,12 +51,13 @@
Location MipsReturnLocation(DataType::Type return_type) {
switch (return_type) {
+ case DataType::Type::kReference:
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
case DataType::Type::kInt32:
- case DataType::Type::kReference:
return Location::RegisterLocation(V0);
case DataType::Type::kInt64:
@@ -84,12 +85,13 @@
Location next_location;
switch (type) {
+ case DataType::Type::kReference:
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
- case DataType::Type::kInt32:
- case DataType::Type::kReference: {
+ case DataType::Type::kInt32: {
uint32_t gp_index = gp_index_++;
if (gp_index < calling_convention.GetNumberOfRegisters()) {
next_location = Location::RegisterLocation(calling_convention.GetRegisterAt(gp_index));
@@ -555,7 +557,7 @@
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(
locations->InAt(0),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
@@ -966,7 +968,7 @@
// We're moving two or three locations to locations that could
// overlap, so we need a parallel move resolver.
InvokeRuntimeCallingConvention calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(ref_,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
DataType::Type::kReference,
@@ -1098,19 +1100,19 @@
block_labels_(nullptr),
location_builder_(graph, this),
instruction_visitor_(graph, this),
- move_resolver_(graph->GetArena(), this),
- assembler_(graph->GetArena(), &isa_features),
+ move_resolver_(graph->GetAllocator(), this),
+ assembler_(graph->GetAllocator(), &isa_features),
isa_features_(isa_features),
uint32_literals_(std::less<uint32_t>(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- string_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ jit_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ jit_class_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
clobbered_ra_(false) {
// Save RA (containing the return address) to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(RA));
@@ -1996,7 +1998,7 @@
void InstructionCodeGeneratorMIPS::GenerateSuspendCheck(HSuspendCheck* instruction,
HBasicBlock* successor) {
SuspendCheckSlowPathMIPS* slow_path =
- new (GetGraph()->GetArena()) SuspendCheckSlowPathMIPS(instruction, successor);
+ new (GetGraph()->GetAllocator()) SuspendCheckSlowPathMIPS(instruction, successor);
codegen_->AddSlowPath(slow_path);
__ LoadFromOffset(kLoadUnsignedHalfword,
@@ -2021,7 +2023,7 @@
void LocationsBuilderMIPS::HandleBinaryOp(HBinaryOperation* instruction) {
DCHECK_EQ(instruction->InputCount(), 2U);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
DataType::Type type = instruction->GetResultType();
switch (type) {
case DataType::Type::kInt32: {
@@ -2287,7 +2289,7 @@
void LocationsBuilderMIPS::HandleShift(HBinaryOperation* instr) {
DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instr);
DataType::Type type = instr->GetResultType();
switch (type) {
case DataType::Type::kInt32:
@@ -2540,10 +2542,10 @@
bool object_array_get_with_read_barrier =
kEmitCompilerReadBarrier && (type == DataType::Type::kReference);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction,
- object_array_get_with_read_barrier
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction,
+ object_array_get_with_read_barrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -2592,7 +2594,8 @@
const bool maybe_compressed_char_at = mirror::kUseStringCompression &&
instruction->IsStringCharAt();
switch (type) {
- case DataType::Type::kBool: {
+ case DataType::Type::kBool:
+ case DataType::Type::kUint8: {
Register out = out_loc.AsRegister<Register>();
if (index.IsConstant()) {
size_t offset =
@@ -2618,19 +2621,6 @@
break;
}
- case DataType::Type::kInt16: {
- Register out = out_loc.AsRegister<Register>();
- if (index.IsConstant()) {
- size_t offset =
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
- __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset, null_checker);
- } else {
- __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_2, TMP);
- __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset, null_checker);
- }
- break;
- }
-
case DataType::Type::kUint16: {
Register out = out_loc.AsRegister<Register>();
if (maybe_compressed_char_at) {
@@ -2675,6 +2665,9 @@
__ ShiftAndAdd(TMP, index_reg, obj, TIMES_2, TMP);
__ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
__ Bind(&done);
+ } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
+ __ Addu(TMP, index_reg, obj);
+ __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset, null_checker);
} else {
__ ShiftAndAdd(TMP, index_reg, obj, TIMES_2, TMP);
__ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset, null_checker);
@@ -2683,6 +2676,22 @@
break;
}
+ case DataType::Type::kInt16: {
+ Register out = out_loc.AsRegister<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
+ __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset, null_checker);
+ } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
+ __ Addu(TMP, index.AsRegister<Register>(), obj);
+ __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset, null_checker);
+ } else {
+ __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_2, TMP);
+ __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset, null_checker);
+ }
+ break;
+ }
+
case DataType::Type::kInt32: {
DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
Register out = out_loc.AsRegister<Register>();
@@ -2690,6 +2699,9 @@
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
__ LoadFromOffset(kLoadWord, out, obj, offset, null_checker);
+ } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
+ __ Addu(TMP, index.AsRegister<Register>(), obj);
+ __ LoadFromOffset(kLoadWord, out, TMP, data_offset, null_checker);
} else {
__ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_4, TMP);
__ LoadFromOffset(kLoadWord, out, TMP, data_offset, null_checker);
@@ -2763,6 +2775,9 @@
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
__ LoadFromOffset(kLoadDoubleword, out, obj, offset, null_checker);
+ } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
+ __ Addu(TMP, index.AsRegister<Register>(), obj);
+ __ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset, null_checker);
} else {
__ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_8, TMP);
__ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset, null_checker);
@@ -2776,6 +2791,9 @@
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
__ LoadSFromOffset(out, obj, offset, null_checker);
+ } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
+ __ Addu(TMP, index.AsRegister<Register>(), obj);
+ __ LoadSFromOffset(out, TMP, data_offset, null_checker);
} else {
__ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_4, TMP);
__ LoadSFromOffset(out, TMP, data_offset, null_checker);
@@ -2789,6 +2807,9 @@
size_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
__ LoadDFromOffset(out, obj, offset, null_checker);
+ } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
+ __ Addu(TMP, index.AsRegister<Register>(), obj);
+ __ LoadDFromOffset(out, TMP, data_offset, null_checker);
} else {
__ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_8, TMP);
__ LoadDFromOffset(out, TMP, data_offset, null_checker);
@@ -2803,7 +2824,7 @@
}
void LocationsBuilderMIPS::VisitArrayLength(HArrayLength* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -2847,7 +2868,7 @@
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
instruction,
may_need_runtime_call_for_type_check ?
LocationSummary::kCallOnSlowPath :
@@ -2880,6 +2901,7 @@
switch (value_type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
if (index.IsConstant()) {
@@ -2897,11 +2919,13 @@
break;
}
- case DataType::Type::kInt16:
- case DataType::Type::kUint16: {
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
if (index.IsConstant()) {
data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2;
+ } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
+ __ Addu(base_reg, index.AsRegister<Register>(), obj);
} else {
__ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_2, base_reg);
}
@@ -2919,6 +2943,8 @@
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
if (index.IsConstant()) {
data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
+ } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
+ __ Addu(base_reg, index.AsRegister<Register>(), obj);
} else {
__ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_4, base_reg);
}
@@ -2960,7 +2986,7 @@
SlowPathCodeMIPS* slow_path = nullptr;
if (may_need_runtime_call_for_type_check) {
- slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathMIPS(instruction);
+ slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathMIPS(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
MipsLabel non_zero;
@@ -2968,6 +2994,8 @@
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
if (index.IsConstant()) {
data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
+ } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
+ __ Addu(base_reg, index.AsRegister<Register>(), obj);
} else {
__ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_4, base_reg);
}
@@ -3051,6 +3079,8 @@
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
if (index.IsConstant()) {
data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8;
+ } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
+ __ Addu(base_reg, index.AsRegister<Register>(), obj);
} else {
__ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_8, base_reg);
}
@@ -3068,6 +3098,8 @@
uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
if (index.IsConstant()) {
data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
+ } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
+ __ Addu(base_reg, index.AsRegister<Register>(), obj);
} else {
__ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_4, base_reg);
}
@@ -3085,6 +3117,8 @@
uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
if (index.IsConstant()) {
data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8;
+ } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
+ __ Addu(base_reg, index.AsRegister<Register>(), obj);
} else {
__ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_8, base_reg);
}
@@ -3104,6 +3138,26 @@
}
}
+void LocationsBuilderMIPS::VisitIntermediateArrayAddressIndex(
+ HIntermediateArrayAddressIndex* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
+
+ HIntConstant* shift = instruction->GetShift()->AsIntConstant();
+
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::ConstantLocation(shift));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorMIPS::VisitIntermediateArrayAddressIndex(
+ HIntermediateArrayAddressIndex* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register index_reg = locations->InAt(0).AsRegister<Register>();
+ uint32_t shift = instruction->GetShift()->AsIntConstant()->GetValue();
+ __ Sll(locations->Out().AsRegister<Register>(), index_reg, shift);
+}
+
void LocationsBuilderMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConvention calling_convention;
@@ -3117,7 +3171,7 @@
void InstructionCodeGeneratorMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
LocationSummary* locations = instruction->GetLocations();
BoundsCheckSlowPathMIPS* slow_path =
- new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS(instruction);
+ new (GetGraph()->GetAllocator()) BoundsCheckSlowPathMIPS(instruction);
codegen_->AddSlowPath(slow_path);
Register index = locations->InAt(0).AsRegister<Register>();
@@ -3168,7 +3222,8 @@
break;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind));
@@ -3208,8 +3263,8 @@
!instruction->CanThrowIntoCatchBlock();
}
SlowPathCodeMIPS* slow_path =
- new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction,
- is_type_check_slow_path_fatal);
+ new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS(instruction,
+ is_type_check_slow_path_fatal);
codegen_->AddSlowPath(slow_path);
// Avoid this check if we know `obj` is not null.
@@ -3363,7 +3418,7 @@
void LocationsBuilderMIPS::VisitClinitCheck(HClinitCheck* check) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
+ new (GetGraph()->GetAllocator()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
locations->SetInAt(0, Location::RequiresRegister());
if (check->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -3372,7 +3427,7 @@
void InstructionCodeGeneratorMIPS::VisitClinitCheck(HClinitCheck* check) {
// We assume the class is not null.
- SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS(
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS(
check->GetLoadClass(),
check,
check->GetDexPc(),
@@ -3386,13 +3441,14 @@
DataType::Type in_type = compare->InputAt(0)->GetType();
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(compare, LocationSummary::kNoCall);
switch (in_type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
- case DataType::Type::kInt16:
case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
case DataType::Type::kInt32:
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
@@ -3429,9 +3485,10 @@
// -1 if: left < right
switch (in_type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
- case DataType::Type::kInt16:
case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
case DataType::Type::kInt32: {
Register lhs = locations->InAt(0).AsRegister<Register>();
Register rhs = locations->InAt(1).AsRegister<Register>();
@@ -3545,7 +3602,7 @@
}
void LocationsBuilderMIPS::HandleCondition(HCondition* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
switch (instruction->InputAt(0)->GetType()) {
default:
case DataType::Type::kInt64:
@@ -3759,7 +3816,7 @@
? LocationSummary::kCallOnMainOnly
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(div, call_kind);
switch (type) {
case DataType::Type::kInt32:
@@ -3826,13 +3883,15 @@
}
void InstructionCodeGeneratorMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathMIPS(instruction);
+ SlowPathCodeMIPS* slow_path =
+ new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathMIPS(instruction);
codegen_->AddSlowPath(slow_path);
Location value = instruction->GetLocations()->InAt(0);
DataType::Type type = instruction->GetType();
switch (type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -3872,7 +3931,7 @@
void LocationsBuilderMIPS::VisitDoubleConstant(HDoubleConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -3889,7 +3948,7 @@
void LocationsBuilderMIPS::VisitFloatConstant(HFloatConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -5469,7 +5528,7 @@
}
void LocationsBuilderMIPS::VisitIf(HIf* if_instr) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(if_instr);
if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) {
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -5486,7 +5545,7 @@
}
void LocationsBuilderMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
InvokeRuntimeCallingConvention calling_convention;
RegisterSet caller_saves = RegisterSet::Empty();
@@ -6041,7 +6100,7 @@
}
void LocationsBuilderMIPS::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(flag, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -6054,7 +6113,7 @@
}
void LocationsBuilderMIPS::VisitSelect(HSelect* select) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(select);
CanMoveConditionally(select, codegen_->GetInstructionSetFeatures().IsR6(), locations);
}
@@ -6079,7 +6138,7 @@
}
void LocationsBuilderMIPS::VisitNativeDebugInfo(HNativeDebugInfo* info) {
- new (GetGraph()->GetArena()) LocationSummary(info);
+ new (GetGraph()->GetAllocator()) LocationSummary(info);
}
void InstructionCodeGeneratorMIPS::VisitNativeDebugInfo(HNativeDebugInfo*) {
@@ -6096,7 +6155,7 @@
bool generate_volatile = field_info.IsVolatile() && is_wide;
bool object_field_get_with_read_barrier =
kEmitCompilerReadBarrier && (field_type == DataType::Type::kReference);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
instruction,
generate_volatile
? LocationSummary::kCallOnMainOnly
@@ -6159,17 +6218,18 @@
switch (type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
load_type = kLoadUnsignedByte;
break;
case DataType::Type::kInt8:
load_type = kLoadSignedByte;
break;
- case DataType::Type::kInt16:
- load_type = kLoadSignedHalfword;
- break;
case DataType::Type::kUint16:
load_type = kLoadUnsignedHalfword;
break;
+ case DataType::Type::kInt16:
+ load_type = kLoadSignedHalfword;
+ break;
case DataType::Type::kInt32:
case DataType::Type::kFloat32:
case DataType::Type::kReference:
@@ -6269,7 +6329,7 @@
DataType::Type field_type = field_info.GetFieldType();
bool is_wide = (field_type == DataType::Type::kInt64) || (field_type == DataType::Type::kFloat64);
bool generate_volatile = field_info.IsVolatile() && is_wide;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
instruction, generate_volatile ? LocationSummary::kCallOnMainOnly : LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
@@ -6312,11 +6372,12 @@
switch (type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
store_type = kStoreByte;
break;
- case DataType::Type::kInt16:
case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
store_type = kStoreHalfword;
break;
case DataType::Type::kInt32:
@@ -6632,7 +6693,7 @@
// Slow path marking the GC root `root`.
Location temp = Location::RegisterLocation(T9);
SlowPathCodeMIPS* slow_path =
- new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathMIPS(
+ new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS(
instruction,
root,
/*entrypoint*/ temp);
@@ -6959,14 +7020,14 @@
// to be null in this code path.
DCHECK_EQ(offset, 0u);
DCHECK_EQ(scale_factor, ScaleFactor::TIMES_1);
- slow_path = new (GetGraph()->GetArena())
+ slow_path = new (GetGraph()->GetAllocator())
ReadBarrierMarkAndUpdateFieldSlowPathMIPS(instruction,
ref,
obj,
/* field_offset */ index,
temp_reg);
} else {
- slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathMIPS(instruction, ref);
+ slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS(instruction, ref);
}
AddSlowPath(slow_path);
@@ -7002,7 +7063,7 @@
// not used by the artReadBarrierSlow entry point.
//
// TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
- SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena())
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator())
ReadBarrierForHeapReferenceSlowPathMIPS(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
@@ -7038,7 +7099,7 @@
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCodeMIPS* slow_path =
- new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathMIPS(instruction, out, root);
+ new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathMIPS(instruction, out, root);
AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
@@ -7065,7 +7126,8 @@
break;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
if (baker_read_barrier_slow_path) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -7207,8 +7269,8 @@
maybe_temp_loc,
kWithoutReadBarrier);
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ Bne(out, cls, slow_path->GetEntryLabel());
__ LoadConst32(out, 1);
@@ -7236,8 +7298,8 @@
// call to the runtime not using a type checking slow path).
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
break;
@@ -7252,7 +7314,7 @@
}
void LocationsBuilderMIPS::VisitIntConstant(HIntConstant* constant) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -7261,7 +7323,7 @@
}
void LocationsBuilderMIPS::VisitNullConstant(HNullConstant* constant) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -7602,7 +7664,7 @@
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(cls, call_kind);
if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -7780,7 +7842,7 @@
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
- SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS(
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck(), bss_info_high);
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
@@ -7800,7 +7862,7 @@
void LocationsBuilderMIPS::VisitLoadException(HLoadException* load) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(load, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -7810,7 +7872,7 @@
}
void LocationsBuilderMIPS::VisitClearException(HClearException* clear) {
- new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(clear, LocationSummary::kNoCall);
}
void InstructionCodeGeneratorMIPS::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
@@ -7819,7 +7881,7 @@
void LocationsBuilderMIPS::VisitLoadString(HLoadString* load) {
LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind);
HLoadString::LoadKind load_kind = load->GetLoadKind();
const bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
const bool has_irreducible_loops = codegen_->GetGraph()->HasIrreducibleLoops();
@@ -7945,7 +8007,7 @@
kCompilerReadBarrierOption,
&info_low->label);
SlowPathCodeMIPS* slow_path =
- new (GetGraph()->GetArena()) LoadStringSlowPathMIPS(load, info_high);
+ new (GetGraph()->GetAllocator()) LoadStringSlowPathMIPS(load, info_high);
codegen_->AddSlowPath(slow_path);
__ Beqz(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -7982,7 +8044,7 @@
}
void LocationsBuilderMIPS::VisitLongConstant(HLongConstant* constant) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -7991,8 +8053,8 @@
}
void LocationsBuilderMIPS::VisitMonitorOperation(HMonitorOperation* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
}
@@ -8009,7 +8071,7 @@
void LocationsBuilderMIPS::VisitMul(HMul* mul) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(mul, LocationSummary::kNoCall);
switch (mul->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64:
@@ -8104,7 +8166,7 @@
void LocationsBuilderMIPS::VisitNeg(HNeg* neg) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall);
switch (neg->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64:
@@ -8162,8 +8224,8 @@
}
void LocationsBuilderMIPS::VisitNewArray(HNewArray* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kReference));
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -8181,8 +8243,8 @@
}
void LocationsBuilderMIPS::VisitNewInstance(HNewInstance* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
if (instruction->IsStringAlloc()) {
locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
@@ -8211,7 +8273,7 @@
}
void LocationsBuilderMIPS::VisitNot(HNot* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -8244,7 +8306,7 @@
}
void LocationsBuilderMIPS::VisitBooleanNot(HBooleanNot* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -8272,7 +8334,7 @@
}
void CodeGeneratorMIPS::GenerateExplicitNullCheck(HNullCheck* instruction) {
- SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathMIPS(instruction);
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetAllocator()) NullCheckSlowPathMIPS(instruction);
AddSlowPath(slow_path);
Location obj = instruction->GetLocations()->InAt(0);
@@ -8301,7 +8363,7 @@
}
void LocationsBuilderMIPS::VisitParameterValue(HParameterValue* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
if (location.IsStackSlot()) {
location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
@@ -8318,7 +8380,7 @@
void LocationsBuilderMIPS::VisitCurrentMethod(HCurrentMethod* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
}
@@ -8328,7 +8390,7 @@
}
void LocationsBuilderMIPS::VisitPhi(HPhi* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
locations->SetInAt(i, Location::Any());
}
@@ -8344,7 +8406,7 @@
LocationSummary::CallKind call_kind = (type == DataType::Type::kInt32)
? LocationSummary::kNoCall
: LocationSummary::kCallOnMainOnly;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(rem, call_kind);
switch (type) {
case DataType::Type::kInt32:
@@ -8422,7 +8484,7 @@
}
void LocationsBuilderMIPS::VisitReturn(HReturn* ret) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(ret);
DataType::Type return_type = ret->InputAt(0)->GetType();
locations->SetInAt(0, MipsReturnLocation(return_type));
}
@@ -8563,8 +8625,8 @@
}
void LocationsBuilderMIPS::VisitSuspendCheck(HSuspendCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
// In suspend check slow path, usually there are no caller-save registers at all.
// If SIMD instructions are present, however, we force spilling all live SIMD
// registers in full width (since the runtime only saves/restores lower part).
@@ -8587,8 +8649,8 @@
}
void LocationsBuilderMIPS::VisitThrow(HThrow* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
}
@@ -8601,7 +8663,8 @@
void LocationsBuilderMIPS::VisitTypeConversion(HTypeConversion* conversion) {
DataType::Type input_type = conversion->GetInputType();
DataType::Type result_type = conversion->GetResultType();
- DCHECK_NE(input_type, result_type);
+ DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type))
+ << input_type << " -> " << result_type;
bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
if ((input_type == DataType::Type::kReference) || (input_type == DataType::Type::kVoid) ||
@@ -8616,7 +8679,8 @@
call_kind = LocationSummary::kCallOnMainOnly;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(conversion, call_kind);
if (call_kind == LocationSummary::kNoCall) {
if (DataType::IsFloatingPointType(input_type)) {
@@ -8652,7 +8716,8 @@
bool has_sign_extension = codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2();
bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
- DCHECK_NE(input_type, result_type);
+ DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type))
+ << input_type << " -> " << result_type;
if (result_type == DataType::Type::kInt64 && DataType::IsIntegralType(input_type)) {
Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
@@ -8670,8 +8735,8 @@
: locations->InAt(0).AsRegister<Register>();
switch (result_type) {
- case DataType::Type::kUint16:
- __ Andi(dst, src, 0xFFFF);
+ case DataType::Type::kUint8:
+ __ Andi(dst, src, 0xFF);
break;
case DataType::Type::kInt8:
if (has_sign_extension) {
@@ -8681,6 +8746,9 @@
__ Sra(dst, dst, 24);
}
break;
+ case DataType::Type::kUint16:
+ __ Andi(dst, src, 0xFFFF);
+ break;
case DataType::Type::kInt16:
if (has_sign_extension) {
__ Seh(dst, src);
@@ -8950,7 +9018,7 @@
void LocationsBuilderMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -9059,7 +9127,7 @@
void LocationsBuilderMIPS::VisitMipsPackedSwitch(HMipsPackedSwitch* switch_instr) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
// Constant area pointer (HMipsComputeBaseMethodAddress).
locations->SetInAt(1, Location::RequiresRegister());
@@ -9088,7 +9156,7 @@
void LocationsBuilderMIPS::VisitMipsComputeBaseMethodAddress(
HMipsComputeBaseMethodAddress* insn) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(insn, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(insn, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -9121,7 +9189,7 @@
void LocationsBuilderMIPS::VisitClassTableGet(HClassTableGet* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 7051cce..fad0fe7 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -50,6 +50,7 @@
Location Mips64ReturnLocation(DataType::Type return_type) {
switch (return_type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -511,7 +512,7 @@
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(
locations->InAt(0),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
@@ -909,7 +910,7 @@
// We're moving two or three locations to locations that could
// overlap, so we need a parallel move resolver.
InvokeRuntimeCallingConvention calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(ref_,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
DataType::Type::kReference,
@@ -1040,23 +1041,23 @@
block_labels_(nullptr),
location_builder_(graph, this),
instruction_visitor_(graph, this),
- move_resolver_(graph->GetArena(), this),
- assembler_(graph->GetArena(), &isa_features),
+ move_resolver_(graph->GetAllocator(), this),
+ assembler_(graph->GetAllocator(), &isa_features),
isa_features_(isa_features),
uint32_literals_(std::less<uint32_t>(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
uint64_literals_(std::less<uint64_t>(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- string_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_class_patches_(TypeReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
// Save RA (containing the return address) to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(RA));
}
@@ -1834,7 +1835,7 @@
void InstructionCodeGeneratorMIPS64::GenerateSuspendCheck(HSuspendCheck* instruction,
HBasicBlock* successor) {
SuspendCheckSlowPathMIPS64* slow_path =
- new (GetGraph()->GetArena()) SuspendCheckSlowPathMIPS64(instruction, successor);
+ new (GetGraph()->GetAllocator()) SuspendCheckSlowPathMIPS64(instruction, successor);
codegen_->AddSlowPath(slow_path);
__ LoadFromOffset(kLoadUnsignedHalfword,
@@ -1859,7 +1860,7 @@
void LocationsBuilderMIPS64::HandleBinaryOp(HBinaryOperation* instruction) {
DCHECK_EQ(instruction->InputCount(), 2U);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
DataType::Type type = instruction->GetResultType();
switch (type) {
case DataType::Type::kInt32:
@@ -1989,7 +1990,7 @@
void LocationsBuilderMIPS64::HandleShift(HBinaryOperation* instr) {
DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instr);
DataType::Type type = instr->GetResultType();
switch (type) {
case DataType::Type::kInt32:
@@ -2118,10 +2119,10 @@
bool object_array_get_with_read_barrier =
kEmitCompilerReadBarrier && (type == DataType::Type::kReference);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction,
- object_array_get_with_read_barrier
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction,
+ object_array_get_with_read_barrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -2170,7 +2171,8 @@
const bool maybe_compressed_char_at = mirror::kUseStringCompression &&
instruction->IsStringCharAt();
switch (type) {
- case DataType::Type::kBool: {
+ case DataType::Type::kBool:
+ case DataType::Type::kUint8: {
GpuRegister out = out_loc.AsRegister<GpuRegister>();
if (index.IsConstant()) {
size_t offset =
@@ -2196,19 +2198,6 @@
break;
}
- case DataType::Type::kInt16: {
- GpuRegister out = out_loc.AsRegister<GpuRegister>();
- if (index.IsConstant()) {
- size_t offset =
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
- __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset, null_checker);
- } else {
- __ Dlsa(TMP, index.AsRegister<GpuRegister>(), obj, TIMES_2);
- __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset, null_checker);
- }
- break;
- }
-
case DataType::Type::kUint16: {
GpuRegister out = out_loc.AsRegister<GpuRegister>();
if (maybe_compressed_char_at) {
@@ -2261,6 +2250,19 @@
break;
}
+ case DataType::Type::kInt16: {
+ GpuRegister out = out_loc.AsRegister<GpuRegister>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
+ __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset, null_checker);
+ } else {
+ __ Dlsa(TMP, index.AsRegister<GpuRegister>(), obj, TIMES_2);
+ __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset, null_checker);
+ }
+ break;
+ }
+
case DataType::Type::kInt32: {
DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
GpuRegister out = out_loc.AsRegister<GpuRegister>();
@@ -2383,7 +2385,7 @@
}
void LocationsBuilderMIPS64::VisitArrayLength(HArrayLength* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -2427,7 +2429,7 @@
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
instruction,
may_need_runtime_call_for_type_check ?
LocationSummary::kCallOnSlowPath :
@@ -2460,6 +2462,7 @@
switch (value_type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
if (index.IsConstant()) {
@@ -2477,8 +2480,8 @@
break;
}
- case DataType::Type::kInt16:
- case DataType::Type::kUint16: {
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
if (index.IsConstant()) {
data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2;
@@ -2540,7 +2543,7 @@
SlowPathCodeMIPS64* slow_path = nullptr;
if (may_need_runtime_call_for_type_check) {
- slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathMIPS64(instruction);
+ slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathMIPS64(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
Mips64Label non_zero;
@@ -2697,7 +2700,7 @@
void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
LocationSummary* locations = instruction->GetLocations();
BoundsCheckSlowPathMIPS64* slow_path =
- new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS64(instruction);
+ new (GetGraph()->GetAllocator()) BoundsCheckSlowPathMIPS64(instruction);
codegen_->AddSlowPath(slow_path);
GpuRegister index = locations->InAt(0).AsRegister<GpuRegister>();
@@ -2748,7 +2751,8 @@
break;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind));
@@ -2788,8 +2792,8 @@
!instruction->CanThrowIntoCatchBlock();
}
SlowPathCodeMIPS64* slow_path =
- new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction,
- is_type_check_slow_path_fatal);
+ new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS64(instruction,
+ is_type_check_slow_path_fatal);
codegen_->AddSlowPath(slow_path);
// Avoid this check if we know `obj` is not null.
@@ -2943,7 +2947,7 @@
void LocationsBuilderMIPS64::VisitClinitCheck(HClinitCheck* check) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
+ new (GetGraph()->GetAllocator()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
locations->SetInAt(0, Location::RequiresRegister());
if (check->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -2952,7 +2956,7 @@
void InstructionCodeGeneratorMIPS64::VisitClinitCheck(HClinitCheck* check) {
// We assume the class is not null.
- SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
+ SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS64(
check->GetLoadClass(),
check,
check->GetDexPc(),
@@ -2965,13 +2969,14 @@
void LocationsBuilderMIPS64::VisitCompare(HCompare* compare) {
DataType::Type in_type = compare->InputAt(0)->GetType();
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(compare);
switch (in_type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
- case DataType::Type::kInt16:
case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
case DataType::Type::kInt32:
case DataType::Type::kInt64:
locations->SetInAt(0, Location::RequiresRegister());
@@ -3001,9 +3006,10 @@
// -1 if: left < right
switch (in_type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
- case DataType::Type::kInt16:
case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
case DataType::Type::kInt32:
case DataType::Type::kInt64: {
GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
@@ -3083,7 +3089,7 @@
}
void LocationsBuilderMIPS64::HandleCondition(HCondition* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
switch (instruction->InputAt(0)->GetType()) {
default:
case DataType::Type::kInt64:
@@ -3371,7 +3377,7 @@
void LocationsBuilderMIPS64::VisitDiv(HDiv* div) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(div, LocationSummary::kNoCall);
switch (div->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64:
@@ -3424,7 +3430,7 @@
void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
SlowPathCodeMIPS64* slow_path =
- new (GetGraph()->GetArena()) DivZeroCheckSlowPathMIPS64(instruction);
+ new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathMIPS64(instruction);
codegen_->AddSlowPath(slow_path);
Location value = instruction->GetLocations()->InAt(0);
@@ -3450,7 +3456,7 @@
void LocationsBuilderMIPS64::VisitDoubleConstant(HDoubleConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -3467,7 +3473,7 @@
void LocationsBuilderMIPS64::VisitFloatConstant(HFloatConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -4250,7 +4256,7 @@
}
void LocationsBuilderMIPS64::VisitIf(HIf* if_instr) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(if_instr);
if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) {
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -4267,7 +4273,7 @@
}
void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
InvokeRuntimeCallingConvention calling_convention;
RegisterSet caller_saves = RegisterSet::Empty();
@@ -4589,7 +4595,7 @@
}
void LocationsBuilderMIPS64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(flag, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -4602,7 +4608,7 @@
}
void LocationsBuilderMIPS64::VisitSelect(HSelect* select) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(select);
CanMoveConditionally(select, locations);
}
@@ -4622,7 +4628,7 @@
}
void LocationsBuilderMIPS64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
- new (GetGraph()->GetArena()) LocationSummary(info);
+ new (GetGraph()->GetAllocator()) LocationSummary(info);
}
void InstructionCodeGeneratorMIPS64::VisitNativeDebugInfo(HNativeDebugInfo*) {
@@ -4638,7 +4644,7 @@
DataType::Type field_type = field_info.GetFieldType();
bool object_field_get_with_read_barrier =
kEmitCompilerReadBarrier && (field_type == DataType::Type::kReference);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
instruction,
object_field_get_with_read_barrier
? LocationSummary::kCallOnSlowPath
@@ -4681,17 +4687,18 @@
switch (type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
load_type = kLoadUnsignedByte;
break;
case DataType::Type::kInt8:
load_type = kLoadSignedByte;
break;
- case DataType::Type::kInt16:
- load_type = kLoadSignedHalfword;
- break;
case DataType::Type::kUint16:
load_type = kLoadUnsignedHalfword;
break;
+ case DataType::Type::kInt16:
+ load_type = kLoadSignedHalfword;
+ break;
case DataType::Type::kInt32:
case DataType::Type::kFloat32:
load_type = kLoadWord;
@@ -4755,7 +4762,7 @@
void LocationsBuilderMIPS64::HandleFieldSet(HInstruction* instruction,
const FieldInfo& field_info ATTRIBUTE_UNUSED) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
if (DataType::IsFloatingPointType(instruction->InputAt(1)->GetType())) {
locations->SetInAt(1, FpuRegisterOrConstantForStore(instruction->InputAt(1)));
@@ -4779,11 +4786,12 @@
switch (type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
store_type = kStoreByte;
break;
- case DataType::Type::kInt16:
case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
store_type = kStoreHalfword;
break;
case DataType::Type::kInt32:
@@ -5043,7 +5051,7 @@
// Slow path marking the GC root `root`.
Location temp = Location::RegisterLocation(T9);
SlowPathCodeMIPS64* slow_path =
- new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathMIPS64(
+ new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS64(
instruction,
root,
/*entrypoint*/ temp);
@@ -5328,14 +5336,14 @@
// above are expected to be null in this code path.
DCHECK_EQ(offset, 0u);
DCHECK_EQ(scale_factor, ScaleFactor::TIMES_1);
- slow_path = new (GetGraph()->GetArena())
+ slow_path = new (GetGraph()->GetAllocator())
ReadBarrierMarkAndUpdateFieldSlowPathMIPS64(instruction,
ref,
obj,
/* field_offset */ index,
temp_reg);
} else {
- slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathMIPS64(instruction, ref);
+ slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathMIPS64(instruction, ref);
}
AddSlowPath(slow_path);
@@ -5371,7 +5379,7 @@
// not used by the artReadBarrierSlow entry point.
//
// TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
- SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena())
+ SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetAllocator())
ReadBarrierForHeapReferenceSlowPathMIPS64(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
@@ -5407,7 +5415,7 @@
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCodeMIPS64* slow_path =
- new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathMIPS64(instruction, out, root);
+ new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathMIPS64(instruction, out, root);
AddSlowPath(slow_path);
__ Bc(slow_path->GetEntryLabel());
@@ -5434,7 +5442,8 @@
break;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
if (baker_read_barrier_slow_path) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -5576,8 +5585,8 @@
maybe_temp_loc,
kWithoutReadBarrier);
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS64(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ Bnec(out, cls, slow_path->GetEntryLabel());
__ LoadConst32(out, 1);
@@ -5605,8 +5614,8 @@
// call to the runtime not using a type checking slow path).
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathMIPS64(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ Bc(slow_path->GetEntryLabel());
break;
@@ -5621,7 +5630,7 @@
}
void LocationsBuilderMIPS64::VisitIntConstant(HIntConstant* constant) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -5630,7 +5639,7 @@
}
void LocationsBuilderMIPS64::VisitNullConstant(HNullConstant* constant) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -5945,7 +5954,7 @@
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(cls, call_kind);
if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -6074,7 +6083,7 @@
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
- SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
+ SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathMIPS64(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck(), bss_info_high);
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
@@ -6094,7 +6103,7 @@
void LocationsBuilderMIPS64::VisitLoadException(HLoadException* load) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(load, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -6104,7 +6113,7 @@
}
void LocationsBuilderMIPS64::VisitClearException(HClearException* clear) {
- new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(clear, LocationSummary::kNoCall);
}
void InstructionCodeGeneratorMIPS64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
@@ -6114,7 +6123,7 @@
void LocationsBuilderMIPS64::VisitLoadString(HLoadString* load) {
HLoadString::LoadKind load_kind = load->GetLoadKind();
LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind);
if (load_kind == HLoadString::LoadKind::kRuntimeCall) {
InvokeRuntimeCallingConvention calling_convention;
locations->SetOut(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -6192,7 +6201,7 @@
kCompilerReadBarrierOption,
&info_low->label);
SlowPathCodeMIPS64* slow_path =
- new (GetGraph()->GetArena()) LoadStringSlowPathMIPS64(load, info_high);
+ new (GetGraph()->GetAllocator()) LoadStringSlowPathMIPS64(load, info_high);
codegen_->AddSlowPath(slow_path);
__ Beqzc(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -6220,7 +6229,7 @@
}
void LocationsBuilderMIPS64::VisitLongConstant(HLongConstant* constant) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -6229,8 +6238,8 @@
}
void LocationsBuilderMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
}
@@ -6248,7 +6257,7 @@
void LocationsBuilderMIPS64::VisitMul(HMul* mul) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(mul, LocationSummary::kNoCall);
switch (mul->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64:
@@ -6303,7 +6312,7 @@
void LocationsBuilderMIPS64::VisitNeg(HNeg* neg) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall);
switch (neg->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64:
@@ -6353,8 +6362,8 @@
}
void LocationsBuilderMIPS64::VisitNewArray(HNewArray* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kReference));
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -6372,8 +6381,8 @@
}
void LocationsBuilderMIPS64::VisitNewInstance(HNewInstance* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
if (instruction->IsStringAlloc()) {
locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
@@ -6403,7 +6412,7 @@
}
void LocationsBuilderMIPS64::VisitNot(HNot* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -6427,7 +6436,7 @@
}
void LocationsBuilderMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -6455,7 +6464,8 @@
}
void CodeGeneratorMIPS64::GenerateExplicitNullCheck(HNullCheck* instruction) {
- SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathMIPS64(instruction);
+ SlowPathCodeMIPS64* slow_path =
+ new (GetGraph()->GetAllocator()) NullCheckSlowPathMIPS64(instruction);
AddSlowPath(slow_path);
Location obj = instruction->GetLocations()->InAt(0);
@@ -6484,7 +6494,7 @@
}
void LocationsBuilderMIPS64::VisitParameterValue(HParameterValue* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
if (location.IsStackSlot()) {
location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
@@ -6501,7 +6511,7 @@
void LocationsBuilderMIPS64::VisitCurrentMethod(HCurrentMethod* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
}
@@ -6511,7 +6521,7 @@
}
void LocationsBuilderMIPS64::VisitPhi(HPhi* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
locations->SetInAt(i, Location::Any());
}
@@ -6527,7 +6537,7 @@
LocationSummary::CallKind call_kind =
DataType::IsFloatingPointType(type) ? LocationSummary::kCallOnMainOnly
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(rem, call_kind);
switch (type) {
case DataType::Type::kInt32:
@@ -6595,7 +6605,7 @@
}
void LocationsBuilderMIPS64::VisitReturn(HReturn* ret) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(ret);
DataType::Type return_type = ret->InputAt(0)->GetType();
locations->SetInAt(0, Mips64ReturnLocation(return_type));
}
@@ -6729,8 +6739,8 @@
}
void LocationsBuilderMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
// In suspend check slow path, usually there are no caller-save registers at all.
// If SIMD instructions are present, however, we force spilling all live SIMD
// registers in full width (since the runtime only saves/restores lower part).
@@ -6753,8 +6763,8 @@
}
void LocationsBuilderMIPS64::VisitThrow(HThrow* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
}
@@ -6767,14 +6777,15 @@
void LocationsBuilderMIPS64::VisitTypeConversion(HTypeConversion* conversion) {
DataType::Type input_type = conversion->GetInputType();
DataType::Type result_type = conversion->GetResultType();
- DCHECK_NE(input_type, result_type);
+ DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type))
+ << input_type << " -> " << result_type;
if ((input_type == DataType::Type::kReference) || (input_type == DataType::Type::kVoid) ||
(result_type == DataType::Type::kReference) || (result_type == DataType::Type::kVoid)) {
LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(conversion);
if (DataType::IsFloatingPointType(input_type)) {
locations->SetInAt(0, Location::RequiresFpuRegister());
@@ -6794,15 +6805,16 @@
DataType::Type result_type = conversion->GetResultType();
DataType::Type input_type = conversion->GetInputType();
- DCHECK_NE(input_type, result_type);
+ DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type))
+ << input_type << " -> " << result_type;
if (DataType::IsIntegralType(result_type) && DataType::IsIntegralType(input_type)) {
GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
switch (result_type) {
- case DataType::Type::kUint16:
- __ Andi(dst, src, 0xFFFF);
+ case DataType::Type::kUint8:
+ __ Andi(dst, src, 0xFF);
break;
case DataType::Type::kInt8:
if (input_type == DataType::Type::kInt64) {
@@ -6815,6 +6827,9 @@
__ Seb(dst, src);
}
break;
+ case DataType::Type::kUint16:
+ __ Andi(dst, src, 0xFFFF);
+ break;
case DataType::Type::kInt16:
if (input_type == DataType::Type::kInt64) {
// Type conversion from long to types narrower than int is a result of code
@@ -7002,7 +7017,7 @@
// Simple implementation of packed switch - generate cascaded compare/jumps.
void LocationsBuilderMIPS64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -7098,7 +7113,7 @@
void LocationsBuilderMIPS64::VisitClassTableGet(HClassTableGet* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
diff --git a/compiler/optimizing/code_generator_vector_arm64.cc b/compiler/optimizing/code_generator_vector_arm64.cc
index 5d5623b..10aced0 100644
--- a/compiler/optimizing/code_generator_vector_arm64.cc
+++ b/compiler/optimizing/code_generator_vector_arm64.cc
@@ -38,10 +38,11 @@
#define __ GetVIXLAssembler()->
void LocationsBuilderARM64::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
HInstruction* input = instruction->InputAt(0);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -73,6 +74,7 @@
VRegister dst = VRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
if (src_loc.IsConstant()) {
@@ -129,9 +131,10 @@
}
void LocationsBuilderARM64::VisitVecExtractScalar(HVecExtractScalar* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -176,8 +179,8 @@
}
// Helper to set up locations for vector unary operations.
-static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
locations->SetInAt(0, Location::RequiresFpuRegister());
@@ -185,6 +188,7 @@
instruction->IsVecNot() ? Location::kOutputOverlap
: Location::kNoOutputOverlap);
break;
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -202,7 +206,7 @@
}
void LocationsBuilderARM64::VisitVecReduce(HVecReduce* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecReduce(HVecReduce* instruction) {
@@ -242,7 +246,7 @@
}
void LocationsBuilderARM64::VisitVecCnv(HVecCnv* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecCnv(HVecCnv* instruction) {
@@ -260,7 +264,7 @@
}
void LocationsBuilderARM64::VisitVecNeg(HVecNeg* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecNeg(HVecNeg* instruction) {
@@ -268,6 +272,7 @@
VRegister src = VRegisterFrom(locations->InAt(0));
VRegister dst = VRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ Neg(dst.V16B(), src.V16B());
@@ -300,7 +305,7 @@
}
void LocationsBuilderARM64::VisitVecAbs(HVecAbs* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecAbs(HVecAbs* instruction) {
@@ -312,7 +317,6 @@
DCHECK_EQ(16u, instruction->GetVectorLength());
__ Abs(dst.V16B(), src.V16B());
break;
- case DataType::Type::kUint16:
case DataType::Type::kInt16:
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Abs(dst.V8H(), src.V8H());
@@ -340,7 +344,7 @@
}
void LocationsBuilderARM64::VisitVecNot(HVecNot* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecNot(HVecNot* instruction) {
@@ -353,6 +357,7 @@
__ Movi(dst.V16B(), 1);
__ Eor(dst.V16B(), dst.V16B(), src.V16B());
break;
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -367,10 +372,11 @@
}
// Helper to set up locations for vector binary operations.
-static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecBinOpLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -389,7 +395,7 @@
}
void LocationsBuilderARM64::VisitVecAdd(HVecAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecAdd(HVecAdd* instruction) {
@@ -398,6 +404,7 @@
VRegister rhs = VRegisterFrom(locations->InAt(1));
VRegister dst = VRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ Add(dst.V16B(), lhs.V16B(), rhs.V16B());
@@ -430,7 +437,7 @@
}
void LocationsBuilderARM64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
@@ -439,30 +446,29 @@
VRegister rhs = VRegisterFrom(locations->InAt(1));
VRegister dst = VRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ instruction->IsRounded()
+ ? __ Urhadd(dst.V16B(), lhs.V16B(), rhs.V16B())
+ : __ Uhadd(dst.V16B(), lhs.V16B(), rhs.V16B());
+ break;
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- instruction->IsRounded()
- ? __ Urhadd(dst.V16B(), lhs.V16B(), rhs.V16B())
- : __ Uhadd(dst.V16B(), lhs.V16B(), rhs.V16B());
- } else {
- instruction->IsRounded()
- ? __ Srhadd(dst.V16B(), lhs.V16B(), rhs.V16B())
- : __ Shadd(dst.V16B(), lhs.V16B(), rhs.V16B());
- }
+ instruction->IsRounded()
+ ? __ Srhadd(dst.V16B(), lhs.V16B(), rhs.V16B())
+ : __ Shadd(dst.V16B(), lhs.V16B(), rhs.V16B());
break;
case DataType::Type::kUint16:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ instruction->IsRounded()
+ ? __ Urhadd(dst.V8H(), lhs.V8H(), rhs.V8H())
+ : __ Uhadd(dst.V8H(), lhs.V8H(), rhs.V8H());
+ break;
case DataType::Type::kInt16:
DCHECK_EQ(8u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- instruction->IsRounded()
- ? __ Urhadd(dst.V8H(), lhs.V8H(), rhs.V8H())
- : __ Uhadd(dst.V8H(), lhs.V8H(), rhs.V8H());
- } else {
- instruction->IsRounded()
- ? __ Srhadd(dst.V8H(), lhs.V8H(), rhs.V8H())
- : __ Shadd(dst.V8H(), lhs.V8H(), rhs.V8H());
- }
+ instruction->IsRounded()
+ ? __ Srhadd(dst.V8H(), lhs.V8H(), rhs.V8H())
+ : __ Shadd(dst.V8H(), lhs.V8H(), rhs.V8H());
break;
default:
LOG(FATAL) << "Unsupported SIMD type";
@@ -471,7 +477,7 @@
}
void LocationsBuilderARM64::VisitVecSub(HVecSub* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecSub(HVecSub* instruction) {
@@ -480,6 +486,7 @@
VRegister rhs = VRegisterFrom(locations->InAt(1));
VRegister dst = VRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ Sub(dst.V16B(), lhs.V16B(), rhs.V16B());
@@ -512,7 +519,7 @@
}
void LocationsBuilderARM64::VisitVecMul(HVecMul* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecMul(HVecMul* instruction) {
@@ -521,6 +528,7 @@
VRegister rhs = VRegisterFrom(locations->InAt(1));
VRegister dst = VRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ Mul(dst.V16B(), lhs.V16B(), rhs.V16B());
@@ -549,7 +557,7 @@
}
void LocationsBuilderARM64::VisitVecDiv(HVecDiv* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecDiv(HVecDiv* instruction) {
@@ -573,7 +581,7 @@
}
void LocationsBuilderARM64::VisitVecMin(HVecMin* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecMin(HVecMin* instruction) {
@@ -582,22 +590,21 @@
VRegister rhs = VRegisterFrom(locations->InAt(1));
VRegister dst = VRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ Umin(dst.V16B(), lhs.V16B(), rhs.V16B());
+ break;
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Umin(dst.V16B(), lhs.V16B(), rhs.V16B());
- } else {
- __ Smin(dst.V16B(), lhs.V16B(), rhs.V16B());
- }
+ __ Smin(dst.V16B(), lhs.V16B(), rhs.V16B());
break;
case DataType::Type::kUint16:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ Umin(dst.V8H(), lhs.V8H(), rhs.V8H());
+ break;
case DataType::Type::kInt16:
DCHECK_EQ(8u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Umin(dst.V8H(), lhs.V8H(), rhs.V8H());
- } else {
- __ Smin(dst.V8H(), lhs.V8H(), rhs.V8H());
- }
+ __ Smin(dst.V8H(), lhs.V8H(), rhs.V8H());
break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
@@ -624,7 +631,7 @@
}
void LocationsBuilderARM64::VisitVecMax(HVecMax* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecMax(HVecMax* instruction) {
@@ -633,22 +640,21 @@
VRegister rhs = VRegisterFrom(locations->InAt(1));
VRegister dst = VRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ Umax(dst.V16B(), lhs.V16B(), rhs.V16B());
+ break;
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Umax(dst.V16B(), lhs.V16B(), rhs.V16B());
- } else {
- __ Smax(dst.V16B(), lhs.V16B(), rhs.V16B());
- }
+ __ Smax(dst.V16B(), lhs.V16B(), rhs.V16B());
break;
case DataType::Type::kUint16:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ Umax(dst.V8H(), lhs.V8H(), rhs.V8H());
+ break;
case DataType::Type::kInt16:
DCHECK_EQ(8u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Umax(dst.V8H(), lhs.V8H(), rhs.V8H());
- } else {
- __ Smax(dst.V8H(), lhs.V8H(), rhs.V8H());
- }
+ __ Smax(dst.V8H(), lhs.V8H(), rhs.V8H());
break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
@@ -675,7 +681,8 @@
}
void LocationsBuilderARM64::VisitVecAnd(HVecAnd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ // TODO: Allow constants supported by BIC (vector, immediate).
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecAnd(HVecAnd* instruction) {
@@ -685,6 +692,7 @@
VRegister dst = VRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -705,11 +713,12 @@
}
void InstructionCodeGeneratorARM64::VisitVecAndNot(HVecAndNot* instruction) {
+ // TODO: Use BIC (vector, register).
LOG(FATAL) << "Unsupported SIMD instruction " << instruction->GetId();
}
void LocationsBuilderARM64::VisitVecOr(HVecOr* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecOr(HVecOr* instruction) {
@@ -719,6 +728,7 @@
VRegister dst = VRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -735,7 +745,7 @@
}
void LocationsBuilderARM64::VisitVecXor(HVecXor* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecXor(HVecXor* instruction) {
@@ -745,6 +755,7 @@
VRegister dst = VRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -761,9 +772,10 @@
}
// Helper to set up locations for vector shift operations.
-static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecShiftLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -780,7 +792,7 @@
}
void LocationsBuilderARM64::VisitVecShl(HVecShl* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecShl(HVecShl* instruction) {
@@ -789,6 +801,7 @@
VRegister dst = VRegisterFrom(locations->Out());
int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ Shl(dst.V16B(), lhs.V16B(), value);
@@ -813,7 +826,7 @@
}
void LocationsBuilderARM64::VisitVecShr(HVecShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecShr(HVecShr* instruction) {
@@ -822,6 +835,7 @@
VRegister dst = VRegisterFrom(locations->Out());
int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ Sshr(dst.V16B(), lhs.V16B(), value);
@@ -846,7 +860,7 @@
}
void LocationsBuilderARM64::VisitVecUShr(HVecUShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARM64::VisitVecUShr(HVecUShr* instruction) {
@@ -855,6 +869,7 @@
VRegister dst = VRegisterFrom(locations->Out());
int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ Ushr(dst.V16B(), lhs.V16B(), value);
@@ -879,7 +894,7 @@
}
void LocationsBuilderARM64::VisitVecSetScalars(HVecSetScalars* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
DCHECK_EQ(1u, instruction->InputCount()); // only one input currently implemented
@@ -888,6 +903,7 @@
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -926,6 +942,7 @@
// Set required elements.
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ Mov(dst.V16B(), 0, InputRegisterAt(instruction, 0));
@@ -950,9 +967,10 @@
}
// Helper to set up locations for vector accumulations.
-static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -970,7 +988,7 @@
}
void LocationsBuilderARM64::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
// Some early revisions of the Cortex-A53 have an erratum (835769) whereby it is possible for a
@@ -985,6 +1003,7 @@
DCHECK(locations->InAt(0).Equals(locations->Out()));
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
if (instruction->GetOpKind() == HInstruction::kAdd) {
@@ -1017,13 +1036,14 @@
}
void LocationsBuilderARM64::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
// Some conversions require temporary registers.
LocationSummary* locations = instruction->GetLocations();
HVecOperation* a = instruction->InputAt(1)->AsVecOperation();
HVecOperation* b = instruction->InputAt(2)->AsVecOperation();
DCHECK_EQ(a->GetPackedType(), b->GetPackedType());
switch (a->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
switch (instruction->GetPackedType()) {
case DataType::Type::kInt64:
@@ -1069,10 +1089,10 @@
HVecOperation* b = instruction->InputAt(2)->AsVecOperation();
DCHECK_EQ(a->GetPackedType(), b->GetPackedType());
switch (a->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, a->GetVectorLength());
switch (instruction->GetPackedType()) {
- case DataType::Type::kUint16:
case DataType::Type::kInt16:
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Sabal(acc.V8H(), left.V8B(), right.V8B());
@@ -1196,12 +1216,13 @@
}
// Helper to set up locations for vector memory operations.
-static void CreateVecMemLocations(ArenaAllocator* arena,
+static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
bool is_load) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -1260,7 +1281,7 @@
}
void LocationsBuilderARM64::VisitVecLoad(HVecLoad* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ true);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ true);
}
void InstructionCodeGeneratorARM64::VisitVecLoad(HVecLoad* instruction) {
@@ -1300,6 +1321,7 @@
}
FALLTHROUGH_INTENDED;
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kInt16:
case DataType::Type::kInt32:
@@ -1317,7 +1339,7 @@
}
void LocationsBuilderARM64::VisitVecStore(HVecStore* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ false);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ false);
}
void InstructionCodeGeneratorARM64::VisitVecStore(HVecStore* instruction) {
@@ -1329,6 +1351,7 @@
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
diff --git a/compiler/optimizing/code_generator_vector_arm_vixl.cc b/compiler/optimizing/code_generator_vector_arm_vixl.cc
index 333d108..f84408d 100644
--- a/compiler/optimizing/code_generator_vector_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_vector_arm_vixl.cc
@@ -33,9 +33,10 @@
#define __ GetVIXLAssembler()->
void LocationsBuilderARMVIXL::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -54,6 +55,7 @@
vixl32::DRegister dst = DRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Vdup(Untyped8, dst, InputRegisterAt(instruction, 0));
@@ -82,8 +84,8 @@
}
// Helper to set up locations for vector unary operations.
-static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
locations->SetInAt(0, Location::RequiresFpuRegister());
@@ -91,6 +93,7 @@
instruction->IsVecNot() ? Location::kOutputOverlap
: Location::kNoOutputOverlap);
break;
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -105,7 +108,7 @@
}
void LocationsBuilderARMVIXL::VisitVecReduce(HVecReduce* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecReduce(HVecReduce* instruction) {
@@ -113,7 +116,7 @@
}
void LocationsBuilderARMVIXL::VisitVecCnv(HVecCnv* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecCnv(HVecCnv* instruction) {
@@ -121,7 +124,7 @@
}
void LocationsBuilderARMVIXL::VisitVecNeg(HVecNeg* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecNeg(HVecNeg* instruction) {
@@ -129,6 +132,7 @@
vixl32::DRegister src = DRegisterFrom(locations->InAt(0));
vixl32::DRegister dst = DRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Vneg(DataTypeValue::S8, dst, src);
@@ -149,7 +153,7 @@
}
void LocationsBuilderARMVIXL::VisitVecAbs(HVecAbs* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecAbs(HVecAbs* instruction) {
@@ -161,7 +165,6 @@
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Vabs(DataTypeValue::S8, dst, src);
break;
- case DataType::Type::kUint16:
case DataType::Type::kInt16:
DCHECK_EQ(4u, instruction->GetVectorLength());
__ Vabs(DataTypeValue::S16, dst, src);
@@ -177,7 +180,7 @@
}
void LocationsBuilderARMVIXL::VisitVecNot(HVecNot* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecNot(HVecNot* instruction) {
@@ -190,6 +193,7 @@
__ Vmov(I8, dst, 1);
__ Veor(dst, dst, src);
break;
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -203,10 +207,11 @@
}
// Helper to set up locations for vector binary operations.
-static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecBinOpLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -222,7 +227,7 @@
}
void LocationsBuilderARMVIXL::VisitVecAdd(HVecAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecAdd(HVecAdd* instruction) {
@@ -231,6 +236,7 @@
vixl32::DRegister rhs = DRegisterFrom(locations->InAt(1));
vixl32::DRegister dst = DRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Vadd(I8, dst, lhs, rhs);
@@ -251,7 +257,7 @@
}
void LocationsBuilderARMVIXL::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
@@ -260,30 +266,29 @@
vixl32::DRegister rhs = DRegisterFrom(locations->InAt(1));
vixl32::DRegister dst = DRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ instruction->IsRounded()
+ ? __ Vrhadd(DataTypeValue::U8, dst, lhs, rhs)
+ : __ Vhadd(DataTypeValue::U8, dst, lhs, rhs);
+ break;
case DataType::Type::kInt8:
DCHECK_EQ(8u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- instruction->IsRounded()
- ? __ Vrhadd(DataTypeValue::U8, dst, lhs, rhs)
- : __ Vhadd(DataTypeValue::U8, dst, lhs, rhs);
- } else {
- instruction->IsRounded()
- ? __ Vrhadd(DataTypeValue::S8, dst, lhs, rhs)
- : __ Vhadd(DataTypeValue::S8, dst, lhs, rhs);
- }
+ instruction->IsRounded()
+ ? __ Vrhadd(DataTypeValue::S8, dst, lhs, rhs)
+ : __ Vhadd(DataTypeValue::S8, dst, lhs, rhs);
break;
case DataType::Type::kUint16:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ instruction->IsRounded()
+ ? __ Vrhadd(DataTypeValue::U16, dst, lhs, rhs)
+ : __ Vhadd(DataTypeValue::U16, dst, lhs, rhs);
+ break;
case DataType::Type::kInt16:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- instruction->IsRounded()
- ? __ Vrhadd(DataTypeValue::U16, dst, lhs, rhs)
- : __ Vhadd(DataTypeValue::U16, dst, lhs, rhs);
- } else {
- instruction->IsRounded()
- ? __ Vrhadd(DataTypeValue::S16, dst, lhs, rhs)
- : __ Vhadd(DataTypeValue::S16, dst, lhs, rhs);
- }
+ instruction->IsRounded()
+ ? __ Vrhadd(DataTypeValue::S16, dst, lhs, rhs)
+ : __ Vhadd(DataTypeValue::S16, dst, lhs, rhs);
break;
default:
LOG(FATAL) << "Unsupported SIMD type";
@@ -292,7 +297,7 @@
}
void LocationsBuilderARMVIXL::VisitVecSub(HVecSub* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecSub(HVecSub* instruction) {
@@ -301,6 +306,7 @@
vixl32::DRegister rhs = DRegisterFrom(locations->InAt(1));
vixl32::DRegister dst = DRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Vsub(I8, dst, lhs, rhs);
@@ -321,7 +327,7 @@
}
void LocationsBuilderARMVIXL::VisitVecMul(HVecMul* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecMul(HVecMul* instruction) {
@@ -330,6 +336,7 @@
vixl32::DRegister rhs = DRegisterFrom(locations->InAt(1));
vixl32::DRegister dst = DRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Vmul(I8, dst, lhs, rhs);
@@ -350,7 +357,7 @@
}
void LocationsBuilderARMVIXL::VisitVecDiv(HVecDiv* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecDiv(HVecDiv* instruction) {
@@ -358,7 +365,7 @@
}
void LocationsBuilderARMVIXL::VisitVecMin(HVecMin* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecMin(HVecMin* instruction) {
@@ -367,22 +374,21 @@
vixl32::DRegister rhs = DRegisterFrom(locations->InAt(1));
vixl32::DRegister dst = DRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ Vmin(DataTypeValue::U8, dst, lhs, rhs);
+ break;
case DataType::Type::kInt8:
DCHECK_EQ(8u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Vmin(DataTypeValue::U8, dst, lhs, rhs);
- } else {
- __ Vmin(DataTypeValue::S8, dst, lhs, rhs);
- }
+ __ Vmin(DataTypeValue::S8, dst, lhs, rhs);
break;
case DataType::Type::kUint16:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Vmin(DataTypeValue::U16, dst, lhs, rhs);
+ break;
case DataType::Type::kInt16:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Vmin(DataTypeValue::U16, dst, lhs, rhs);
- } else {
- __ Vmin(DataTypeValue::S16, dst, lhs, rhs);
- }
+ __ Vmin(DataTypeValue::S16, dst, lhs, rhs);
break;
case DataType::Type::kInt32:
DCHECK_EQ(2u, instruction->GetVectorLength());
@@ -399,7 +405,7 @@
}
void LocationsBuilderARMVIXL::VisitVecMax(HVecMax* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecMax(HVecMax* instruction) {
@@ -408,22 +414,21 @@
vixl32::DRegister rhs = DRegisterFrom(locations->InAt(1));
vixl32::DRegister dst = DRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ Vmax(DataTypeValue::U8, dst, lhs, rhs);
+ break;
case DataType::Type::kInt8:
DCHECK_EQ(8u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Vmax(DataTypeValue::U8, dst, lhs, rhs);
- } else {
- __ Vmax(DataTypeValue::S8, dst, lhs, rhs);
- }
+ __ Vmax(DataTypeValue::S8, dst, lhs, rhs);
break;
case DataType::Type::kUint16:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Vmax(DataTypeValue::U16, dst, lhs, rhs);
+ break;
case DataType::Type::kInt16:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Vmax(DataTypeValue::U16, dst, lhs, rhs);
- } else {
- __ Vmax(DataTypeValue::S16, dst, lhs, rhs);
- }
+ __ Vmax(DataTypeValue::S16, dst, lhs, rhs);
break;
case DataType::Type::kInt32:
DCHECK_EQ(2u, instruction->GetVectorLength());
@@ -440,7 +445,8 @@
}
void LocationsBuilderARMVIXL::VisitVecAnd(HVecAnd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ // TODO: Allow constants supported by VAND (immediate).
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecAnd(HVecAnd* instruction) {
@@ -450,6 +456,7 @@
vixl32::DRegister dst = DRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -463,7 +470,7 @@
}
void LocationsBuilderARMVIXL::VisitVecAndNot(HVecAndNot* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecAndNot(HVecAndNot* instruction) {
@@ -471,7 +478,7 @@
}
void LocationsBuilderARMVIXL::VisitVecOr(HVecOr* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecOr(HVecOr* instruction) {
@@ -481,6 +488,7 @@
vixl32::DRegister dst = DRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -494,7 +502,7 @@
}
void LocationsBuilderARMVIXL::VisitVecXor(HVecXor* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecXor(HVecXor* instruction) {
@@ -504,6 +512,7 @@
vixl32::DRegister dst = DRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -517,9 +526,10 @@
}
// Helper to set up locations for vector shift operations.
-static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecShiftLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -535,7 +545,7 @@
}
void LocationsBuilderARMVIXL::VisitVecShl(HVecShl* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecShl(HVecShl* instruction) {
@@ -544,6 +554,7 @@
vixl32::DRegister dst = DRegisterFrom(locations->Out());
int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Vshl(I8, dst, lhs, value);
@@ -564,7 +575,7 @@
}
void LocationsBuilderARMVIXL::VisitVecShr(HVecShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecShr(HVecShr* instruction) {
@@ -573,6 +584,7 @@
vixl32::DRegister dst = DRegisterFrom(locations->Out());
int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Vshr(DataTypeValue::S8, dst, lhs, value);
@@ -593,7 +605,7 @@
}
void LocationsBuilderARMVIXL::VisitVecUShr(HVecUShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecUShr(HVecUShr* instruction) {
@@ -602,6 +614,7 @@
vixl32::DRegister dst = DRegisterFrom(locations->Out());
int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Vshr(DataTypeValue::U8, dst, lhs, value);
@@ -630,9 +643,10 @@
}
// Helper to set up locations for vector accumulations.
-static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -650,7 +664,7 @@
}
void LocationsBuilderARMVIXL::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
@@ -658,7 +672,7 @@
}
void LocationsBuilderARMVIXL::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorARMVIXL::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
@@ -672,12 +686,13 @@
}
// Helper to set up locations for vector memory operations.
-static void CreateVecMemLocations(ArenaAllocator* arena,
+static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
bool is_load) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -752,7 +767,7 @@
}
void LocationsBuilderARMVIXL::VisitVecLoad(HVecLoad* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ true);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ true);
}
void InstructionCodeGeneratorARMVIXL::VisitVecLoad(HVecLoad* instruction) {
@@ -764,6 +779,7 @@
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(8u, instruction->GetVectorLength());
if (IsWordAligned(instruction)) {
@@ -802,7 +818,7 @@
}
void LocationsBuilderARMVIXL::VisitVecStore(HVecStore* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ false);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ false);
}
void InstructionCodeGeneratorARMVIXL::VisitVecStore(HVecStore* instruction) {
@@ -811,6 +827,7 @@
vixl32::Register scratch;
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(8u, instruction->GetVectorLength());
if (IsWordAligned(instruction)) {
diff --git a/compiler/optimizing/code_generator_vector_mips.cc b/compiler/optimizing/code_generator_vector_mips.cc
index c25f5ac..c5a39ff 100644
--- a/compiler/optimizing/code_generator_vector_mips.cc
+++ b/compiler/optimizing/code_generator_vector_mips.cc
@@ -24,9 +24,10 @@
#define __ down_cast<MipsAssembler*>(GetAssembler())-> // NOLINT
void LocationsBuilderMIPS::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -51,6 +52,7 @@
VectorRegister dst = VectorRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ FillB(dst, locations->InAt(0).AsRegister<Register>());
@@ -97,8 +99,8 @@
}
// Helper to set up locations for vector unary operations.
-static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
locations->SetInAt(0, Location::RequiresFpuRegister());
@@ -106,6 +108,7 @@
instruction->IsVecNot() ? Location::kOutputOverlap
: Location::kNoOutputOverlap);
break;
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -126,7 +129,7 @@
}
void LocationsBuilderMIPS::VisitVecReduce(HVecReduce* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecReduce(HVecReduce* instruction) {
@@ -134,7 +137,7 @@
}
void LocationsBuilderMIPS::VisitVecCnv(HVecCnv* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecCnv(HVecCnv* instruction) {
@@ -152,7 +155,7 @@
}
void LocationsBuilderMIPS::VisitVecNeg(HVecNeg* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecNeg(HVecNeg* instruction) {
@@ -160,6 +163,7 @@
VectorRegister src = VectorRegisterFrom(locations->InAt(0));
VectorRegister dst = VectorRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ FillB(dst, ZERO);
@@ -198,7 +202,7 @@
}
void LocationsBuilderMIPS::VisitVecAbs(HVecAbs* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecAbs(HVecAbs* instruction) {
@@ -211,7 +215,6 @@
__ FillB(dst, ZERO); // all zeroes
__ Add_aB(dst, dst, src); // dst = abs(0) + abs(src)
break;
- case DataType::Type::kUint16:
case DataType::Type::kInt16:
DCHECK_EQ(8u, instruction->GetVectorLength());
__ FillH(dst, ZERO); // all zeroes
@@ -246,7 +249,7 @@
}
void LocationsBuilderMIPS::VisitVecNot(HVecNot* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecNot(HVecNot* instruction) {
@@ -259,6 +262,7 @@
__ LdiB(dst, 1);
__ XorV(dst, dst, src);
break;
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -277,10 +281,11 @@
}
// Helper to set up locations for vector binary operations.
-static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecBinOpLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -299,7 +304,7 @@
}
void LocationsBuilderMIPS::VisitVecAdd(HVecAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecAdd(HVecAdd* instruction) {
@@ -308,6 +313,7 @@
VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
VectorRegister dst = VectorRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ AddvB(dst, lhs, rhs);
@@ -340,7 +346,7 @@
}
void LocationsBuilderMIPS::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
@@ -349,30 +355,29 @@
VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
VectorRegister dst = VectorRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ instruction->IsRounded()
+ ? __ Aver_uB(dst, lhs, rhs)
+ : __ Ave_uB(dst, lhs, rhs);
+ break;
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- instruction->IsRounded()
- ? __ Aver_uB(dst, lhs, rhs)
- : __ Ave_uB(dst, lhs, rhs);
- } else {
- instruction->IsRounded()
- ? __ Aver_sB(dst, lhs, rhs)
- : __ Ave_sB(dst, lhs, rhs);
- }
+ instruction->IsRounded()
+ ? __ Aver_sB(dst, lhs, rhs)
+ : __ Ave_sB(dst, lhs, rhs);
break;
case DataType::Type::kUint16:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ instruction->IsRounded()
+ ? __ Aver_uH(dst, lhs, rhs)
+ : __ Ave_uH(dst, lhs, rhs);
+ break;
case DataType::Type::kInt16:
DCHECK_EQ(8u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- instruction->IsRounded()
- ? __ Aver_uH(dst, lhs, rhs)
- : __ Ave_uH(dst, lhs, rhs);
- } else {
- instruction->IsRounded()
- ? __ Aver_sH(dst, lhs, rhs)
- : __ Ave_sH(dst, lhs, rhs);
- }
+ instruction->IsRounded()
+ ? __ Aver_sH(dst, lhs, rhs)
+ : __ Ave_sH(dst, lhs, rhs);
break;
default:
LOG(FATAL) << "Unsupported SIMD type";
@@ -381,7 +386,7 @@
}
void LocationsBuilderMIPS::VisitVecSub(HVecSub* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecSub(HVecSub* instruction) {
@@ -390,6 +395,7 @@
VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
VectorRegister dst = VectorRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ SubvB(dst, lhs, rhs);
@@ -422,7 +428,7 @@
}
void LocationsBuilderMIPS::VisitVecMul(HVecMul* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecMul(HVecMul* instruction) {
@@ -431,6 +437,7 @@
VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
VectorRegister dst = VectorRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ MulvB(dst, lhs, rhs);
@@ -463,7 +470,7 @@
}
void LocationsBuilderMIPS::VisitVecDiv(HVecDiv* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecDiv(HVecDiv* instruction) {
@@ -487,7 +494,7 @@
}
void LocationsBuilderMIPS::VisitVecMin(HVecMin* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecMin(HVecMin* instruction) {
@@ -496,22 +503,21 @@
VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
VectorRegister dst = VectorRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ Min_uB(dst, lhs, rhs);
+ break;
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Min_uB(dst, lhs, rhs);
- } else {
- __ Min_sB(dst, lhs, rhs);
- }
+ __ Min_sB(dst, lhs, rhs);
break;
case DataType::Type::kUint16:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ Min_uH(dst, lhs, rhs);
+ break;
case DataType::Type::kInt16:
DCHECK_EQ(8u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Min_uH(dst, lhs, rhs);
- } else {
- __ Min_sH(dst, lhs, rhs);
- }
+ __ Min_sH(dst, lhs, rhs);
break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
@@ -548,7 +554,7 @@
}
void LocationsBuilderMIPS::VisitVecMax(HVecMax* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecMax(HVecMax* instruction) {
@@ -557,22 +563,21 @@
VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
VectorRegister dst = VectorRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ Max_uB(dst, lhs, rhs);
+ break;
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Max_uB(dst, lhs, rhs);
- } else {
- __ Max_sB(dst, lhs, rhs);
- }
+ __ Max_sB(dst, lhs, rhs);
break;
case DataType::Type::kUint16:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ Max_uH(dst, lhs, rhs);
+ break;
case DataType::Type::kInt16:
DCHECK_EQ(8u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Max_uH(dst, lhs, rhs);
- } else {
- __ Max_sH(dst, lhs, rhs);
- }
+ __ Max_sH(dst, lhs, rhs);
break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
@@ -609,7 +614,7 @@
}
void LocationsBuilderMIPS::VisitVecAnd(HVecAnd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecAnd(HVecAnd* instruction) {
@@ -619,6 +624,7 @@
VectorRegister dst = VectorRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -637,7 +643,7 @@
}
void LocationsBuilderMIPS::VisitVecAndNot(HVecAndNot* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecAndNot(HVecAndNot* instruction) {
@@ -645,7 +651,7 @@
}
void LocationsBuilderMIPS::VisitVecOr(HVecOr* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecOr(HVecOr* instruction) {
@@ -655,6 +661,7 @@
VectorRegister dst = VectorRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -673,7 +680,7 @@
}
void LocationsBuilderMIPS::VisitVecXor(HVecXor* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecXor(HVecXor* instruction) {
@@ -683,6 +690,7 @@
VectorRegister dst = VectorRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -701,9 +709,10 @@
}
// Helper to set up locations for vector shift operations.
-static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecShiftLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -720,7 +729,7 @@
}
void LocationsBuilderMIPS::VisitVecShl(HVecShl* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecShl(HVecShl* instruction) {
@@ -729,6 +738,7 @@
VectorRegister dst = VectorRegisterFrom(locations->Out());
int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ SlliB(dst, lhs, value);
@@ -753,7 +763,7 @@
}
void LocationsBuilderMIPS::VisitVecShr(HVecShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecShr(HVecShr* instruction) {
@@ -762,6 +772,7 @@
VectorRegister dst = VectorRegisterFrom(locations->Out());
int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ SraiB(dst, lhs, value);
@@ -786,7 +797,7 @@
}
void LocationsBuilderMIPS::VisitVecUShr(HVecUShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecUShr(HVecUShr* instruction) {
@@ -795,6 +806,7 @@
VectorRegister dst = VectorRegisterFrom(locations->Out());
int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ SrliB(dst, lhs, value);
@@ -827,9 +839,10 @@
}
// Helper to set up locations for vector accumulations.
-static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -847,7 +860,7 @@
}
void LocationsBuilderMIPS::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
@@ -856,6 +869,7 @@
VectorRegister left = VectorRegisterFrom(locations->InAt(1));
VectorRegister right = VectorRegisterFrom(locations->InAt(2));
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
if (instruction->GetOpKind() == HInstruction::kAdd) {
@@ -896,7 +910,7 @@
}
void LocationsBuilderMIPS::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
@@ -905,12 +919,13 @@
}
// Helper to set up locations for vector memory operations.
-static void CreateVecMemLocations(ArenaAllocator* arena,
+static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
bool is_load) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -965,7 +980,7 @@
}
void LocationsBuilderMIPS::VisitVecLoad(HVecLoad* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /* is_load */ true);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ true);
}
void InstructionCodeGeneratorMIPS::VisitVecLoad(HVecLoad* instruction) {
@@ -976,6 +991,7 @@
int32_t offset = VecAddress(locations, size, &base);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ LdB(reg, base, offset);
@@ -1007,7 +1023,7 @@
}
void LocationsBuilderMIPS::VisitVecStore(HVecStore* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /* is_load */ false);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ false);
}
void InstructionCodeGeneratorMIPS::VisitVecStore(HVecStore* instruction) {
@@ -1018,6 +1034,7 @@
int32_t offset = VecAddress(locations, size, &base);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ StB(reg, base, offset);
diff --git a/compiler/optimizing/code_generator_vector_mips64.cc b/compiler/optimizing/code_generator_vector_mips64.cc
index f60f708..e606df2 100644
--- a/compiler/optimizing/code_generator_vector_mips64.cc
+++ b/compiler/optimizing/code_generator_vector_mips64.cc
@@ -29,9 +29,10 @@
}
void LocationsBuilderMIPS64::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -56,6 +57,7 @@
VectorRegister dst = VectorRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ FillB(dst, locations->InAt(0).AsRegister<GpuRegister>());
@@ -100,8 +102,8 @@
}
// Helper to set up locations for vector unary operations.
-static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
locations->SetInAt(0, Location::RequiresFpuRegister());
@@ -109,6 +111,7 @@
instruction->IsVecNot() ? Location::kOutputOverlap
: Location::kNoOutputOverlap);
break;
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -129,7 +132,7 @@
}
void LocationsBuilderMIPS64::VisitVecReduce(HVecReduce* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecReduce(HVecReduce* instruction) {
@@ -137,7 +140,7 @@
}
void LocationsBuilderMIPS64::VisitVecCnv(HVecCnv* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecCnv(HVecCnv* instruction) {
@@ -156,7 +159,7 @@
}
void LocationsBuilderMIPS64::VisitVecNeg(HVecNeg* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecNeg(HVecNeg* instruction) {
@@ -164,6 +167,7 @@
VectorRegister src = VectorRegisterFrom(locations->InAt(0));
VectorRegister dst = VectorRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ FillB(dst, ZERO);
@@ -202,7 +206,7 @@
}
void LocationsBuilderMIPS64::VisitVecAbs(HVecAbs* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecAbs(HVecAbs* instruction) {
@@ -215,7 +219,6 @@
__ FillB(dst, ZERO); // all zeroes
__ Add_aB(dst, dst, src); // dst = abs(0) + abs(src)
break;
- case DataType::Type::kUint16:
case DataType::Type::kInt16:
DCHECK_EQ(8u, instruction->GetVectorLength());
__ FillH(dst, ZERO); // all zeroes
@@ -250,7 +253,7 @@
}
void LocationsBuilderMIPS64::VisitVecNot(HVecNot* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecNot(HVecNot* instruction) {
@@ -263,6 +266,7 @@
__ LdiB(dst, 1);
__ XorV(dst, dst, src);
break;
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -281,10 +285,11 @@
}
// Helper to set up locations for vector binary operations.
-static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecBinOpLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -303,7 +308,7 @@
}
void LocationsBuilderMIPS64::VisitVecAdd(HVecAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecAdd(HVecAdd* instruction) {
@@ -312,6 +317,7 @@
VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
VectorRegister dst = VectorRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ AddvB(dst, lhs, rhs);
@@ -344,7 +350,7 @@
}
void LocationsBuilderMIPS64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
@@ -353,30 +359,29 @@
VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
VectorRegister dst = VectorRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ instruction->IsRounded()
+ ? __ Aver_uB(dst, lhs, rhs)
+ : __ Ave_uB(dst, lhs, rhs);
+ break;
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- instruction->IsRounded()
- ? __ Aver_uB(dst, lhs, rhs)
- : __ Ave_uB(dst, lhs, rhs);
- } else {
- instruction->IsRounded()
- ? __ Aver_sB(dst, lhs, rhs)
- : __ Ave_sB(dst, lhs, rhs);
- }
+ instruction->IsRounded()
+ ? __ Aver_sB(dst, lhs, rhs)
+ : __ Ave_sB(dst, lhs, rhs);
break;
case DataType::Type::kUint16:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ instruction->IsRounded()
+ ? __ Aver_uH(dst, lhs, rhs)
+ : __ Ave_uH(dst, lhs, rhs);
+ break;
case DataType::Type::kInt16:
DCHECK_EQ(8u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- instruction->IsRounded()
- ? __ Aver_uH(dst, lhs, rhs)
- : __ Ave_uH(dst, lhs, rhs);
- } else {
- instruction->IsRounded()
- ? __ Aver_sH(dst, lhs, rhs)
- : __ Ave_sH(dst, lhs, rhs);
- }
+ instruction->IsRounded()
+ ? __ Aver_sH(dst, lhs, rhs)
+ : __ Ave_sH(dst, lhs, rhs);
break;
default:
LOG(FATAL) << "Unsupported SIMD type";
@@ -385,7 +390,7 @@
}
void LocationsBuilderMIPS64::VisitVecSub(HVecSub* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecSub(HVecSub* instruction) {
@@ -394,6 +399,7 @@
VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
VectorRegister dst = VectorRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ SubvB(dst, lhs, rhs);
@@ -426,7 +432,7 @@
}
void LocationsBuilderMIPS64::VisitVecMul(HVecMul* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecMul(HVecMul* instruction) {
@@ -435,6 +441,7 @@
VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
VectorRegister dst = VectorRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ MulvB(dst, lhs, rhs);
@@ -467,7 +474,7 @@
}
void LocationsBuilderMIPS64::VisitVecDiv(HVecDiv* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecDiv(HVecDiv* instruction) {
@@ -491,7 +498,7 @@
}
void LocationsBuilderMIPS64::VisitVecMin(HVecMin* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecMin(HVecMin* instruction) {
@@ -500,22 +507,21 @@
VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
VectorRegister dst = VectorRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ Min_uB(dst, lhs, rhs);
+ break;
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Min_uB(dst, lhs, rhs);
- } else {
- __ Min_sB(dst, lhs, rhs);
- }
+ __ Min_sB(dst, lhs, rhs);
break;
case DataType::Type::kUint16:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ Min_uH(dst, lhs, rhs);
+ break;
case DataType::Type::kInt16:
DCHECK_EQ(8u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Min_uH(dst, lhs, rhs);
- } else {
- __ Min_sH(dst, lhs, rhs);
- }
+ __ Min_sH(dst, lhs, rhs);
break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
@@ -552,7 +558,7 @@
}
void LocationsBuilderMIPS64::VisitVecMax(HVecMax* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecMax(HVecMax* instruction) {
@@ -561,22 +567,21 @@
VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
VectorRegister dst = VectorRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ Max_uB(dst, lhs, rhs);
+ break;
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Max_uB(dst, lhs, rhs);
- } else {
- __ Max_sB(dst, lhs, rhs);
- }
+ __ Max_sB(dst, lhs, rhs);
break;
case DataType::Type::kUint16:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ Max_uH(dst, lhs, rhs);
+ break;
case DataType::Type::kInt16:
DCHECK_EQ(8u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Max_uH(dst, lhs, rhs);
- } else {
- __ Max_sH(dst, lhs, rhs);
- }
+ __ Max_sH(dst, lhs, rhs);
break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
@@ -613,7 +618,7 @@
}
void LocationsBuilderMIPS64::VisitVecAnd(HVecAnd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecAnd(HVecAnd* instruction) {
@@ -623,6 +628,7 @@
VectorRegister dst = VectorRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -641,7 +647,7 @@
}
void LocationsBuilderMIPS64::VisitVecAndNot(HVecAndNot* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecAndNot(HVecAndNot* instruction) {
@@ -649,7 +655,7 @@
}
void LocationsBuilderMIPS64::VisitVecOr(HVecOr* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecOr(HVecOr* instruction) {
@@ -659,6 +665,7 @@
VectorRegister dst = VectorRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -677,7 +684,7 @@
}
void LocationsBuilderMIPS64::VisitVecXor(HVecXor* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecXor(HVecXor* instruction) {
@@ -687,6 +694,7 @@
VectorRegister dst = VectorRegisterFrom(locations->Out());
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -705,9 +713,10 @@
}
// Helper to set up locations for vector shift operations.
-static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecShiftLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -724,7 +733,7 @@
}
void LocationsBuilderMIPS64::VisitVecShl(HVecShl* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecShl(HVecShl* instruction) {
@@ -733,6 +742,7 @@
VectorRegister dst = VectorRegisterFrom(locations->Out());
int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ SlliB(dst, lhs, value);
@@ -757,7 +767,7 @@
}
void LocationsBuilderMIPS64::VisitVecShr(HVecShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecShr(HVecShr* instruction) {
@@ -766,6 +776,7 @@
VectorRegister dst = VectorRegisterFrom(locations->Out());
int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ SraiB(dst, lhs, value);
@@ -790,7 +801,7 @@
}
void LocationsBuilderMIPS64::VisitVecUShr(HVecUShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecUShr(HVecUShr* instruction) {
@@ -799,6 +810,7 @@
VectorRegister dst = VectorRegisterFrom(locations->Out());
int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ SrliB(dst, lhs, value);
@@ -831,9 +843,10 @@
}
// Helper to set up locations for vector accumulations.
-static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -851,7 +864,7 @@
}
void LocationsBuilderMIPS64::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
@@ -860,6 +873,7 @@
VectorRegister left = VectorRegisterFrom(locations->InAt(1));
VectorRegister right = VectorRegisterFrom(locations->InAt(2));
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
if (instruction->GetOpKind() == HInstruction::kAdd) {
@@ -900,7 +914,7 @@
}
void LocationsBuilderMIPS64::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorMIPS64::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
@@ -909,12 +923,13 @@
}
// Helper to set up locations for vector memory operations.
-static void CreateVecMemLocations(ArenaAllocator* arena,
+static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
bool is_load) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -969,7 +984,7 @@
}
void LocationsBuilderMIPS64::VisitVecLoad(HVecLoad* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /* is_load */ true);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ true);
}
void InstructionCodeGeneratorMIPS64::VisitVecLoad(HVecLoad* instruction) {
@@ -980,6 +995,7 @@
int32_t offset = VecAddress(locations, size, &base);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ LdB(reg, base, offset);
@@ -1011,7 +1027,7 @@
}
void LocationsBuilderMIPS64::VisitVecStore(HVecStore* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /* is_load */ false);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ false);
}
void InstructionCodeGeneratorMIPS64::VisitVecStore(HVecStore* instruction) {
@@ -1022,6 +1038,7 @@
int32_t offset = VecAddress(locations, size, &base);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ StB(reg, base, offset);
diff --git a/compiler/optimizing/code_generator_vector_x86.cc b/compiler/optimizing/code_generator_vector_x86.cc
index 6515dbe..ad8128a 100644
--- a/compiler/optimizing/code_generator_vector_x86.cc
+++ b/compiler/optimizing/code_generator_vector_x86.cc
@@ -26,7 +26,7 @@
#define __ down_cast<X86Assembler*>(GetAssembler())-> // NOLINT
void LocationsBuilderX86::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
HInstruction* input = instruction->InputAt(0);
bool is_zero = IsZeroBitPattern(input);
switch (instruction->GetPackedType()) {
@@ -37,6 +37,7 @@
}
FALLTHROUGH_INTENDED;
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -70,6 +71,7 @@
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ movd(dst, locations->InAt(0).AsRegister<Register>());
@@ -115,13 +117,14 @@
}
void LocationsBuilderX86::VisitVecExtractScalar(HVecExtractScalar* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kInt64:
// Long needs extra temporary to store into the register pair.
locations->AddTemp(Location::RequiresFpuRegister());
FALLTHROUGH_INTENDED;
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -145,6 +148,7 @@
XmmRegister src = locations->InAt(0).AsFpuRegister<XmmRegister>();
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16: // TODO: up to here, and?
@@ -176,10 +180,11 @@
}
// Helper to set up locations for vector unary operations.
-static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -197,7 +202,7 @@
}
void LocationsBuilderX86::VisitVecReduce(HVecReduce* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
// Long reduction or min/max require a temporary.
if (instruction->GetPackedType() == DataType::Type::kInt64 ||
instruction->GetKind() == HVecReduce::kMin ||
@@ -264,7 +269,7 @@
}
void LocationsBuilderX86::VisitVecCnv(HVecCnv* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecCnv(HVecCnv* instruction) {
@@ -282,7 +287,7 @@
}
void LocationsBuilderX86::VisitVecNeg(HVecNeg* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecNeg(HVecNeg* instruction) {
@@ -290,6 +295,7 @@
XmmRegister src = locations->InAt(0).AsFpuRegister<XmmRegister>();
XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ pxor(dst, dst);
@@ -328,7 +334,7 @@
}
void LocationsBuilderX86::VisitVecAbs(HVecAbs* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
// Integral-abs requires a temporary for the comparison.
if (instruction->GetPackedType() == DataType::Type::kInt32) {
instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
@@ -369,7 +375,7 @@
}
void LocationsBuilderX86::VisitVecNot(HVecNot* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
// Boolean-not requires a temporary to construct the 16 x one.
if (instruction->GetPackedType() == DataType::Type::kBool) {
instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
@@ -390,6 +396,7 @@
__ pxor(dst, src);
break;
}
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -417,10 +424,11 @@
}
// Helper to set up locations for vector binary operations.
-static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecBinOpLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -439,7 +447,7 @@
}
void LocationsBuilderX86::VisitVecAdd(HVecAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecAdd(HVecAdd* instruction) {
@@ -448,6 +456,7 @@
XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ paddb(dst, src);
@@ -480,7 +489,7 @@
}
void LocationsBuilderX86::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
@@ -490,15 +499,13 @@
XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
DCHECK(instruction->IsRounded());
- DCHECK(instruction->IsUnsigned());
switch (instruction->GetPackedType()) {
- case DataType::Type::kInt8:
+ case DataType::Type::kUint8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ pavgb(dst, src);
return;
case DataType::Type::kUint16:
- case DataType::Type::kInt16:
DCHECK_EQ(8u, instruction->GetVectorLength());
__ pavgw(dst, src);
return;
@@ -509,7 +516,7 @@
}
void LocationsBuilderX86::VisitVecSub(HVecSub* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecSub(HVecSub* instruction) {
@@ -518,6 +525,7 @@
XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ psubb(dst, src);
@@ -550,7 +558,7 @@
}
void LocationsBuilderX86::VisitVecMul(HVecMul* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecMul(HVecMul* instruction) {
@@ -583,7 +591,7 @@
}
void LocationsBuilderX86::VisitVecDiv(HVecDiv* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecDiv(HVecDiv* instruction) {
@@ -607,7 +615,7 @@
}
void LocationsBuilderX86::VisitVecMin(HVecMin* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecMin(HVecMin* instruction) {
@@ -616,22 +624,21 @@
XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ pminub(dst, src);
+ break;
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ pminub(dst, src);
- } else {
- __ pminsb(dst, src);
- }
+ __ pminsb(dst, src);
break;
case DataType::Type::kUint16:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ pminuw(dst, src);
+ break;
case DataType::Type::kInt16:
DCHECK_EQ(8u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ pminuw(dst, src);
- } else {
- __ pminsw(dst, src);
- }
+ __ pminsw(dst, src);
break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
@@ -659,7 +666,7 @@
}
void LocationsBuilderX86::VisitVecMax(HVecMax* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecMax(HVecMax* instruction) {
@@ -668,22 +675,21 @@
XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ pmaxub(dst, src);
+ break;
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ pmaxub(dst, src);
- } else {
- __ pmaxsb(dst, src);
- }
+ __ pmaxsb(dst, src);
break;
case DataType::Type::kUint16:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ pmaxuw(dst, src);
+ break;
case DataType::Type::kInt16:
DCHECK_EQ(8u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ pmaxuw(dst, src);
- } else {
- __ pmaxsw(dst, src);
- }
+ __ pmaxsw(dst, src);
break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
@@ -711,7 +717,7 @@
}
void LocationsBuilderX86::VisitVecAnd(HVecAnd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecAnd(HVecAnd* instruction) {
@@ -721,6 +727,7 @@
XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -745,7 +752,7 @@
}
void LocationsBuilderX86::VisitVecAndNot(HVecAndNot* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecAndNot(HVecAndNot* instruction) {
@@ -755,6 +762,7 @@
XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -779,7 +787,7 @@
}
void LocationsBuilderX86::VisitVecOr(HVecOr* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecOr(HVecOr* instruction) {
@@ -789,6 +797,7 @@
XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -813,7 +822,7 @@
}
void LocationsBuilderX86::VisitVecXor(HVecXor* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecXor(HVecXor* instruction) {
@@ -823,6 +832,7 @@
XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -847,8 +857,8 @@
}
// Helper to set up locations for vector shift operations.
-static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecShiftLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -865,7 +875,7 @@
}
void LocationsBuilderX86::VisitVecShl(HVecShl* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecShl(HVecShl* instruction) {
@@ -894,7 +904,7 @@
}
void LocationsBuilderX86::VisitVecShr(HVecShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecShr(HVecShr* instruction) {
@@ -919,7 +929,7 @@
}
void LocationsBuilderX86::VisitVecUShr(HVecUShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecUShr(HVecUShr* instruction) {
@@ -948,7 +958,7 @@
}
void LocationsBuilderX86::VisitVecSetScalars(HVecSetScalars* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
DCHECK_EQ(1u, instruction->InputCount()); // only one input currently implemented
@@ -963,6 +973,7 @@
}
FALLTHROUGH_INTENDED;
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -1000,6 +1011,7 @@
// Set required elements.
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16: // TODO: up to here, and?
@@ -1033,9 +1045,10 @@
}
// Helper to set up locations for vector accumulations.
-static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -1053,7 +1066,7 @@
}
void LocationsBuilderX86::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
@@ -1062,7 +1075,7 @@
}
void LocationsBuilderX86::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
@@ -1071,12 +1084,13 @@
}
// Helper to set up locations for vector memory operations.
-static void CreateVecMemLocations(ArenaAllocator* arena,
+static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
bool is_load) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -1117,7 +1131,7 @@
}
void LocationsBuilderX86::VisitVecLoad(HVecLoad* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ true);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ true);
// String load requires a temporary for the compressed load.
if (mirror::kUseStringCompression && instruction->IsStringCharAt()) {
instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
@@ -1156,6 +1170,7 @@
}
FALLTHROUGH_INTENDED;
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kInt16:
case DataType::Type::kInt32:
@@ -1179,7 +1194,7 @@
}
void LocationsBuilderX86::VisitVecStore(HVecStore* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ false);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ false);
}
void InstructionCodeGeneratorX86::VisitVecStore(HVecStore* instruction) {
@@ -1190,6 +1205,7 @@
bool is_aligned16 = instruction->GetAlignment().IsAlignedAt(16);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
diff --git a/compiler/optimizing/code_generator_vector_x86_64.cc b/compiler/optimizing/code_generator_vector_x86_64.cc
index 4241042..107030e 100644
--- a/compiler/optimizing/code_generator_vector_x86_64.cc
+++ b/compiler/optimizing/code_generator_vector_x86_64.cc
@@ -26,11 +26,12 @@
#define __ down_cast<X86_64Assembler*>(GetAssembler())-> // NOLINT
void LocationsBuilderX86_64::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
HInstruction* input = instruction->InputAt(0);
bool is_zero = IsZeroBitPattern(input);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -65,6 +66,7 @@
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ movd(dst, locations->InAt(0).AsRegister<CpuRegister>(), /*64-bit*/ false);
@@ -106,9 +108,10 @@
}
void LocationsBuilderX86_64::VisitVecExtractScalar(HVecExtractScalar* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -133,6 +136,7 @@
XmmRegister src = locations->InAt(0).AsFpuRegister<XmmRegister>();
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16: // TODO: up to here, and?
@@ -159,10 +163,11 @@
}
// Helper to set up locations for vector unary operations.
-static void CreateVecUnOpLocations(ArenaAllocator* arena, HVecUnaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -180,7 +185,7 @@
}
void LocationsBuilderX86_64::VisitVecReduce(HVecReduce* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
// Long reduction or min/max require a temporary.
if (instruction->GetPackedType() == DataType::Type::kInt64 ||
instruction->GetKind() == HVecReduce::kMin ||
@@ -247,7 +252,7 @@
}
void LocationsBuilderX86_64::VisitVecCnv(HVecCnv* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecCnv(HVecCnv* instruction) {
@@ -265,7 +270,7 @@
}
void LocationsBuilderX86_64::VisitVecNeg(HVecNeg* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecNeg(HVecNeg* instruction) {
@@ -273,6 +278,7 @@
XmmRegister src = locations->InAt(0).AsFpuRegister<XmmRegister>();
XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ pxor(dst, dst);
@@ -311,7 +317,7 @@
}
void LocationsBuilderX86_64::VisitVecAbs(HVecAbs* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
// Integral-abs requires a temporary for the comparison.
if (instruction->GetPackedType() == DataType::Type::kInt32) {
instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
@@ -352,7 +358,7 @@
}
void LocationsBuilderX86_64::VisitVecNot(HVecNot* instruction) {
- CreateVecUnOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
// Boolean-not requires a temporary to construct the 16 x one.
if (instruction->GetPackedType() == DataType::Type::kBool) {
instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
@@ -373,6 +379,7 @@
__ pxor(dst, src);
break;
}
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -400,10 +407,11 @@
}
// Helper to set up locations for vector binary operations.
-static void CreateVecBinOpLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecBinOpLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -422,7 +430,7 @@
}
void LocationsBuilderX86_64::VisitVecAdd(HVecAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecAdd(HVecAdd* instruction) {
@@ -431,6 +439,7 @@
XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ paddb(dst, src);
@@ -463,7 +472,7 @@
}
void LocationsBuilderX86_64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
@@ -473,15 +482,13 @@
XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
DCHECK(instruction->IsRounded());
- DCHECK(instruction->IsUnsigned());
switch (instruction->GetPackedType()) {
- case DataType::Type::kInt8:
+ case DataType::Type::kUint8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ pavgb(dst, src);
return;
case DataType::Type::kUint16:
- case DataType::Type::kInt16:
DCHECK_EQ(8u, instruction->GetVectorLength());
__ pavgw(dst, src);
return;
@@ -492,7 +499,7 @@
}
void LocationsBuilderX86_64::VisitVecSub(HVecSub* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecSub(HVecSub* instruction) {
@@ -501,6 +508,7 @@
XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
__ psubb(dst, src);
@@ -533,7 +541,7 @@
}
void LocationsBuilderX86_64::VisitVecMul(HVecMul* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecMul(HVecMul* instruction) {
@@ -566,7 +574,7 @@
}
void LocationsBuilderX86_64::VisitVecDiv(HVecDiv* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecDiv(HVecDiv* instruction) {
@@ -590,7 +598,7 @@
}
void LocationsBuilderX86_64::VisitVecMin(HVecMin* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecMin(HVecMin* instruction) {
@@ -599,22 +607,21 @@
XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ pminub(dst, src);
+ break;
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ pminub(dst, src);
- } else {
- __ pminsb(dst, src);
- }
+ __ pminsb(dst, src);
break;
case DataType::Type::kUint16:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ pminuw(dst, src);
+ break;
case DataType::Type::kInt16:
DCHECK_EQ(8u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ pminuw(dst, src);
- } else {
- __ pminsw(dst, src);
- }
+ __ pminsw(dst, src);
break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
@@ -642,7 +649,7 @@
}
void LocationsBuilderX86_64::VisitVecMax(HVecMax* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecMax(HVecMax* instruction) {
@@ -651,22 +658,21 @@
XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
+ DCHECK_EQ(16u, instruction->GetVectorLength());
+ __ pmaxub(dst, src);
+ break;
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ pmaxub(dst, src);
- } else {
- __ pmaxsb(dst, src);
- }
+ __ pmaxsb(dst, src);
break;
case DataType::Type::kUint16:
+ DCHECK_EQ(8u, instruction->GetVectorLength());
+ __ pmaxuw(dst, src);
+ break;
case DataType::Type::kInt16:
DCHECK_EQ(8u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ pmaxuw(dst, src);
- } else {
- __ pmaxsw(dst, src);
- }
+ __ pmaxsw(dst, src);
break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
@@ -694,7 +700,7 @@
}
void LocationsBuilderX86_64::VisitVecAnd(HVecAnd* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecAnd(HVecAnd* instruction) {
@@ -704,6 +710,7 @@
XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -728,7 +735,7 @@
}
void LocationsBuilderX86_64::VisitVecAndNot(HVecAndNot* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecAndNot(HVecAndNot* instruction) {
@@ -738,6 +745,7 @@
XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -762,7 +770,7 @@
}
void LocationsBuilderX86_64::VisitVecOr(HVecOr* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecOr(HVecOr* instruction) {
@@ -772,6 +780,7 @@
XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -796,7 +805,7 @@
}
void LocationsBuilderX86_64::VisitVecXor(HVecXor* instruction) {
- CreateVecBinOpLocations(GetGraph()->GetArena(), instruction);
+ CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecXor(HVecXor* instruction) {
@@ -806,6 +815,7 @@
XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -830,8 +840,8 @@
}
// Helper to set up locations for vector shift operations.
-static void CreateVecShiftLocations(ArenaAllocator* arena, HVecBinaryOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecShiftLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -848,7 +858,7 @@
}
void LocationsBuilderX86_64::VisitVecShl(HVecShl* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecShl(HVecShl* instruction) {
@@ -877,7 +887,7 @@
}
void LocationsBuilderX86_64::VisitVecShr(HVecShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecShr(HVecShr* instruction) {
@@ -902,7 +912,7 @@
}
void LocationsBuilderX86_64::VisitVecUShr(HVecUShr* instruction) {
- CreateVecShiftLocations(GetGraph()->GetArena(), instruction);
+ CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecUShr(HVecUShr* instruction) {
@@ -931,7 +941,7 @@
}
void LocationsBuilderX86_64::VisitVecSetScalars(HVecSetScalars* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
DCHECK_EQ(1u, instruction->InputCount()); // only one input currently implemented
@@ -940,6 +950,7 @@
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -978,6 +989,7 @@
// Set required elements.
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16: // TODO: up to here, and?
@@ -1006,9 +1018,10 @@
}
// Helper to set up locations for vector accumulations.
-static void CreateVecAccumLocations(ArenaAllocator* arena, HVecOperation* instruction) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* instruction) {
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -1026,7 +1039,7 @@
}
void LocationsBuilderX86_64::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
@@ -1035,7 +1048,7 @@
}
void LocationsBuilderX86_64::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetArena(), instruction);
+ CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
}
void InstructionCodeGeneratorX86_64::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
@@ -1044,12 +1057,13 @@
}
// Helper to set up locations for vector memory operations.
-static void CreateVecMemLocations(ArenaAllocator* arena,
+static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
bool is_load) {
- LocationSummary* locations = new (arena) LocationSummary(instruction);
+ LocationSummary* locations = new (allocator) LocationSummary(instruction);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -1090,7 +1104,7 @@
}
void LocationsBuilderX86_64::VisitVecLoad(HVecLoad* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ true);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ true);
// String load requires a temporary for the compressed load.
if (mirror::kUseStringCompression && instruction->IsStringCharAt()) {
instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
@@ -1129,6 +1143,7 @@
}
FALLTHROUGH_INTENDED;
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kInt16:
case DataType::Type::kInt32:
@@ -1152,7 +1167,7 @@
}
void LocationsBuilderX86_64::VisitVecStore(HVecStore* instruction) {
- CreateVecMemLocations(GetGraph()->GetArena(), instruction, /*is_load*/ false);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /*is_load*/ false);
}
void InstructionCodeGeneratorX86_64::VisitVecStore(HVecStore* instruction) {
@@ -1163,6 +1178,7 @@
bool is_aligned16 = instruction->GetAlignment().IsAlignedAt(16);
switch (instruction->GetPackedType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 70e270e..d8a47fa 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -414,7 +414,7 @@
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(
locations->InAt(0),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
@@ -811,7 +811,7 @@
// We're moving two or three locations to locations that could
// overlap, so we need a parallel move resolver.
InvokeRuntimeCallingConvention calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(ref_,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
DataType::Type::kReference,
@@ -1030,21 +1030,21 @@
block_labels_(nullptr),
location_builder_(graph, this),
instruction_visitor_(graph, this),
- move_resolver_(graph->GetArena(), this),
- assembler_(graph->GetArena()),
+ move_resolver_(graph->GetAllocator(), this),
+ assembler_(graph->GetAllocator()),
isa_features_(isa_features),
- boot_image_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- boot_image_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- string_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ boot_image_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ boot_image_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ jit_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ jit_class_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
constant_area_start_(-1),
- fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ fixups_to_jump_tables_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
method_address_offset_(std::less<uint32_t>(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
// Use a fake return address register to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
}
@@ -1131,12 +1131,13 @@
Location InvokeDexCallingConventionVisitorX86::GetReturnLocation(DataType::Type type) const {
switch (type) {
+ case DataType::Type::kReference:
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
case DataType::Type::kInt32:
- case DataType::Type::kReference:
return Location::RegisterLocation(EAX);
case DataType::Type::kInt64:
@@ -1159,12 +1160,13 @@
Location InvokeDexCallingConventionVisitorX86::GetNextLocation(DataType::Type type) {
switch (type) {
+ case DataType::Type::kReference:
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
- case DataType::Type::kInt32:
- case DataType::Type::kReference: {
+ case DataType::Type::kInt32: {
uint32_t index = gp_index_++;
stack_index_++;
if (index < calling_convention.GetNumberOfRegisters()) {
@@ -1331,7 +1333,7 @@
}
void CodeGeneratorX86::MoveLocation(Location dst, Location src, DataType::Type dst_type) {
- HParallelMove move(GetGraph()->GetArena());
+ HParallelMove move(GetGraph()->GetAllocator());
if (dst_type == DataType::Type::kInt64 && !src.IsConstant() && !src.IsFpuRegister()) {
move.AddMove(src.ToLow(), dst.ToLow(), DataType::Type::kInt32, nullptr);
move.AddMove(src.ToHigh(), dst.ToHigh(), DataType::Type::kInt32, nullptr);
@@ -1679,7 +1681,7 @@
}
void LocationsBuilderX86::VisitIf(HIf* if_instr) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(if_instr);
if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) {
locations->SetInAt(0, Location::Any());
}
@@ -1696,7 +1698,7 @@
}
void LocationsBuilderX86::VisitDeoptimize(HDeoptimize* deoptimize) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
InvokeRuntimeCallingConvention calling_convention;
RegisterSet caller_saves = RegisterSet::Empty();
@@ -1716,7 +1718,7 @@
}
void LocationsBuilderX86::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(flag, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -1748,7 +1750,7 @@
}
void LocationsBuilderX86::VisitSelect(HSelect* select) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(select);
if (DataType::IsFloatingPointType(select->GetType())) {
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::Any());
@@ -1842,7 +1844,7 @@
}
void LocationsBuilderX86::VisitNativeDebugInfo(HNativeDebugInfo* info) {
- new (GetGraph()->GetArena()) LocationSummary(info);
+ new (GetGraph()->GetAllocator()) LocationSummary(info);
}
void InstructionCodeGeneratorX86::VisitNativeDebugInfo(HNativeDebugInfo*) {
@@ -1855,7 +1857,7 @@
void LocationsBuilderX86::HandleCondition(HCondition* cond) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(cond, LocationSummary::kNoCall);
// Handle the long/FP comparisons made in instruction simplification.
switch (cond->InputAt(0)->GetType()) {
case DataType::Type::kInt64: {
@@ -2022,7 +2024,7 @@
void LocationsBuilderX86::VisitIntConstant(HIntConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -2032,7 +2034,7 @@
void LocationsBuilderX86::VisitNullConstant(HNullConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -2042,7 +2044,7 @@
void LocationsBuilderX86::VisitLongConstant(HLongConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -2052,7 +2054,7 @@
void LocationsBuilderX86::VisitFloatConstant(HFloatConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -2062,7 +2064,7 @@
void LocationsBuilderX86::VisitDoubleConstant(HDoubleConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -2097,14 +2099,15 @@
void LocationsBuilderX86::VisitReturn(HReturn* ret) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(ret, LocationSummary::kNoCall);
switch (ret->InputAt(0)->GetType()) {
+ case DataType::Type::kReference:
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
case DataType::Type::kInt32:
- case DataType::Type::kReference:
locations->SetInAt(0, Location::RegisterLocation(EAX));
break;
@@ -2127,12 +2130,13 @@
void InstructionCodeGeneratorX86::VisitReturn(HReturn* ret) {
if (kIsDebugBuild) {
switch (ret->InputAt(0)->GetType()) {
+ case DataType::Type::kReference:
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
case DataType::Type::kInt32:
- case DataType::Type::kReference:
DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegister<Register>(), EAX);
break;
@@ -2296,7 +2300,7 @@
void LocationsBuilderX86::VisitNeg(HNeg* neg) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall);
switch (neg->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64:
@@ -2377,7 +2381,7 @@
void LocationsBuilderX86::VisitX86FPNeg(HX86FPNeg* neg) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall);
DCHECK(DataType::IsFloatingPointType(neg->GetType()));
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::RequiresRegister());
@@ -2408,7 +2412,8 @@
void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) {
DataType::Type result_type = conversion->GetResultType();
DataType::Type input_type = conversion->GetInputType();
- DCHECK_NE(result_type, input_type);
+ DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type))
+ << input_type << " -> " << result_type;
// The float-to-long and double-to-long type conversions rely on a
// call to the runtime.
@@ -2418,16 +2423,23 @@
? LocationSummary::kCallOnMainOnly
: LocationSummary::kNoCall;
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
-
- // The Java language does not allow treating boolean as an integral type but
- // our bit representation makes it safe.
+ new (GetGraph()->GetAllocator()) LocationSummary(conversion, call_kind);
switch (result_type) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
switch (input_type) {
+ case DataType::Type::kUint8:
+ case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
+ case DataType::Type::kInt32:
+ locations->SetInAt(0, Location::ByteRegisterOrConstant(ECX, conversion->InputAt(0)));
+ // Make the output overlap to please the register allocator. This greatly simplifies
+ // the validation of the linear scan implementation
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ break;
case DataType::Type::kInt64: {
- // Type conversion from long to byte is a result of code transformations.
HInstruction* input = conversion->InputAt(0);
Location input_location = input->IsConstant()
? Location::ConstantLocation(input->AsConstant())
@@ -2438,17 +2450,6 @@
locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
break;
}
- case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
- case DataType::Type::kInt16:
- case DataType::Type::kInt32:
- case DataType::Type::kUint16:
- // Processing a Dex `int-to-byte' instruction.
- locations->SetInAt(0, Location::ByteRegisterOrConstant(ECX, conversion->InputAt(0)));
- // Make the output overlap to please the register allocator. This greatly simplifies
- // the validation of the linear scan implementation
- locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
- break;
default:
LOG(FATAL) << "Unexpected type conversion from " << input_type
@@ -2456,43 +2457,27 @@
}
break;
+ case DataType::Type::kUint16:
case DataType::Type::kInt16:
- switch (input_type) {
- case DataType::Type::kInt64:
- // Type conversion from long to short is a result of code transformations.
- case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
- case DataType::Type::kInt8:
- case DataType::Type::kInt32:
- case DataType::Type::kUint16:
- // Processing a Dex `int-to-short' instruction.
- locations->SetInAt(0, Location::Any());
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
- break;
-
- default:
- LOG(FATAL) << "Unexpected type conversion from " << input_type
- << " to " << result_type;
- }
+ DCHECK(DataType::IsIntegralType(input_type)) << input_type;
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
case DataType::Type::kInt32:
switch (input_type) {
case DataType::Type::kInt64:
- // Processing a Dex `long-to-int' instruction.
locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
case DataType::Type::kFloat32:
- // Processing a Dex `float-to-int' instruction.
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
locations->AddTemp(Location::RequiresFpuRegister());
break;
case DataType::Type::kFloat64:
- // Processing a Dex `double-to-int' instruction.
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
locations->AddTemp(Location::RequiresFpuRegister());
@@ -2507,19 +2492,17 @@
case DataType::Type::kInt64:
switch (input_type) {
case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
case DataType::Type::kInt16:
case DataType::Type::kInt32:
- case DataType::Type::kUint16:
- // Processing a Dex `int-to-long' instruction.
locations->SetInAt(0, Location::RegisterLocation(EAX));
locations->SetOut(Location::RegisterPairLocation(EAX, EDX));
break;
case DataType::Type::kFloat32:
case DataType::Type::kFloat64: {
- // Processing a Dex `float-to-long' or 'double-to-long' instruction.
InvokeRuntimeCallingConvention calling_convention;
XmmRegister parameter = calling_convention.GetFpuRegisterAt(0);
locations->SetInAt(0, Location::FpuRegisterLocation(parameter));
@@ -2535,47 +2518,24 @@
}
break;
- case DataType::Type::kUint16:
- switch (input_type) {
- case DataType::Type::kInt64:
- // Type conversion from long to char is a result of code transformations.
- case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
- case DataType::Type::kInt8:
- case DataType::Type::kInt16:
- case DataType::Type::kInt32:
- // Processing a Dex `int-to-char' instruction.
- locations->SetInAt(0, Location::Any());
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
- break;
-
- default:
- LOG(FATAL) << "Unexpected type conversion from " << input_type
- << " to " << result_type;
- }
- break;
-
case DataType::Type::kFloat32:
switch (input_type) {
case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
case DataType::Type::kInt16:
case DataType::Type::kInt32:
- case DataType::Type::kUint16:
- // Processing a Dex `int-to-float' instruction.
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresFpuRegister());
break;
case DataType::Type::kInt64:
- // Processing a Dex `long-to-float' instruction.
locations->SetInAt(0, Location::Any());
locations->SetOut(Location::Any());
break;
case DataType::Type::kFloat64:
- // Processing a Dex `double-to-float' instruction.
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
break;
@@ -2589,24 +2549,21 @@
case DataType::Type::kFloat64:
switch (input_type) {
case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
case DataType::Type::kInt16:
case DataType::Type::kInt32:
- case DataType::Type::kUint16:
- // Processing a Dex `int-to-double' instruction.
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresFpuRegister());
break;
case DataType::Type::kInt64:
- // Processing a Dex `long-to-double' instruction.
locations->SetInAt(0, Location::Any());
locations->SetOut(Location::Any());
break;
case DataType::Type::kFloat32:
- // Processing a Dex `float-to-double' instruction.
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
break;
@@ -2629,12 +2586,54 @@
Location in = locations->InAt(0);
DataType::Type result_type = conversion->GetResultType();
DataType::Type input_type = conversion->GetInputType();
- DCHECK_NE(result_type, input_type);
+ DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type))
+ << input_type << " -> " << result_type;
switch (result_type) {
+ case DataType::Type::kUint8:
+ switch (input_type) {
+ case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
+ case DataType::Type::kInt32:
+ if (in.IsRegister()) {
+ __ movzxb(out.AsRegister<Register>(), in.AsRegister<ByteRegister>());
+ } else {
+ DCHECK(in.GetConstant()->IsIntConstant());
+ int32_t value = in.GetConstant()->AsIntConstant()->GetValue();
+ __ movl(out.AsRegister<Register>(), Immediate(static_cast<uint8_t>(value)));
+ }
+ break;
+ case DataType::Type::kInt64:
+ if (in.IsRegisterPair()) {
+ __ movzxb(out.AsRegister<Register>(), in.AsRegisterPairLow<ByteRegister>());
+ } else {
+ DCHECK(in.GetConstant()->IsLongConstant());
+ int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
+ __ movl(out.AsRegister<Register>(), Immediate(static_cast<uint8_t>(value)));
+ }
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case DataType::Type::kInt8:
switch (input_type) {
+ case DataType::Type::kUint8:
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
+ case DataType::Type::kInt32:
+ if (in.IsRegister()) {
+ __ movsxb(out.AsRegister<Register>(), in.AsRegister<ByteRegister>());
+ } else {
+ DCHECK(in.GetConstant()->IsIntConstant());
+ int32_t value = in.GetConstant()->AsIntConstant()->GetValue();
+ __ movl(out.AsRegister<Register>(), Immediate(static_cast<int8_t>(value)));
+ }
+ break;
case DataType::Type::kInt64:
- // Type conversion from long to byte is a result of code transformations.
if (in.IsRegisterPair()) {
__ movsxb(out.AsRegister<Register>(), in.AsRegisterPairLow<ByteRegister>());
} else {
@@ -2643,18 +2642,37 @@
__ movl(out.AsRegister<Register>(), Immediate(static_cast<int8_t>(value)));
}
break;
- case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case DataType::Type::kUint16:
+ switch (input_type) {
+ case DataType::Type::kInt8:
case DataType::Type::kInt16:
case DataType::Type::kInt32:
- case DataType::Type::kUint16:
- // Processing a Dex `int-to-byte' instruction.
if (in.IsRegister()) {
- __ movsxb(out.AsRegister<Register>(), in.AsRegister<ByteRegister>());
+ __ movzxw(out.AsRegister<Register>(), in.AsRegister<Register>());
+ } else if (in.IsStackSlot()) {
+ __ movzxw(out.AsRegister<Register>(), Address(ESP, in.GetStackIndex()));
} else {
DCHECK(in.GetConstant()->IsIntConstant());
int32_t value = in.GetConstant()->AsIntConstant()->GetValue();
- __ movl(out.AsRegister<Register>(), Immediate(static_cast<int8_t>(value)));
+ __ movl(out.AsRegister<Register>(), Immediate(static_cast<uint16_t>(value)));
+ }
+ break;
+ case DataType::Type::kInt64:
+ if (in.IsRegisterPair()) {
+ __ movzxw(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>());
+ } else if (in.IsDoubleStackSlot()) {
+ __ movzxw(out.AsRegister<Register>(), Address(ESP, in.GetStackIndex()));
+ } else {
+ DCHECK(in.GetConstant()->IsLongConstant());
+ int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
+ __ movl(out.AsRegister<Register>(), Immediate(static_cast<uint16_t>(value)));
}
break;
@@ -2666,24 +2684,8 @@
case DataType::Type::kInt16:
switch (input_type) {
- case DataType::Type::kInt64:
- // Type conversion from long to short is a result of code transformations.
- if (in.IsRegisterPair()) {
- __ movsxw(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>());
- } else if (in.IsDoubleStackSlot()) {
- __ movsxw(out.AsRegister<Register>(), Address(ESP, in.GetStackIndex()));
- } else {
- DCHECK(in.GetConstant()->IsLongConstant());
- int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
- __ movl(out.AsRegister<Register>(), Immediate(static_cast<int16_t>(value)));
- }
- break;
- case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
- case DataType::Type::kInt8:
- case DataType::Type::kInt32:
case DataType::Type::kUint16:
- // Processing a Dex `int-to-short' instruction.
+ case DataType::Type::kInt32:
if (in.IsRegister()) {
__ movsxw(out.AsRegister<Register>(), in.AsRegister<Register>());
} else if (in.IsStackSlot()) {
@@ -2694,6 +2696,17 @@
__ movl(out.AsRegister<Register>(), Immediate(static_cast<int16_t>(value)));
}
break;
+ case DataType::Type::kInt64:
+ if (in.IsRegisterPair()) {
+ __ movsxw(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>());
+ } else if (in.IsDoubleStackSlot()) {
+ __ movsxw(out.AsRegister<Register>(), Address(ESP, in.GetStackIndex()));
+ } else {
+ DCHECK(in.GetConstant()->IsLongConstant());
+ int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
+ __ movl(out.AsRegister<Register>(), Immediate(static_cast<int16_t>(value)));
+ }
+ break;
default:
LOG(FATAL) << "Unexpected type conversion from " << input_type
@@ -2704,7 +2717,6 @@
case DataType::Type::kInt32:
switch (input_type) {
case DataType::Type::kInt64:
- // Processing a Dex `long-to-int' instruction.
if (in.IsRegisterPair()) {
__ movl(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>());
} else if (in.IsDoubleStackSlot()) {
@@ -2718,7 +2730,6 @@
break;
case DataType::Type::kFloat32: {
- // Processing a Dex `float-to-int' instruction.
XmmRegister input = in.AsFpuRegister<XmmRegister>();
Register output = out.AsRegister<Register>();
XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
@@ -2743,7 +2754,6 @@
}
case DataType::Type::kFloat64: {
- // Processing a Dex `double-to-int' instruction.
XmmRegister input = in.AsFpuRegister<XmmRegister>();
Register output = out.AsRegister<Register>();
XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
@@ -2776,12 +2786,11 @@
case DataType::Type::kInt64:
switch (input_type) {
case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
case DataType::Type::kInt16:
case DataType::Type::kInt32:
- case DataType::Type::kUint16:
- // Processing a Dex `int-to-long' instruction.
DCHECK_EQ(out.AsRegisterPairLow<Register>(), EAX);
DCHECK_EQ(out.AsRegisterPairHigh<Register>(), EDX);
DCHECK_EQ(in.AsRegister<Register>(), EAX);
@@ -2789,13 +2798,11 @@
break;
case DataType::Type::kFloat32:
- // Processing a Dex `float-to-long' instruction.
codegen_->InvokeRuntime(kQuickF2l, conversion, conversion->GetDexPc());
CheckEntrypointTypes<kQuickF2l, int64_t, float>();
break;
case DataType::Type::kFloat64:
- // Processing a Dex `double-to-long' instruction.
codegen_->InvokeRuntime(kQuickD2l, conversion, conversion->GetDexPc());
CheckEntrypointTypes<kQuickD2l, int64_t, double>();
break;
@@ -2806,57 +2813,18 @@
}
break;
- case DataType::Type::kUint16:
- switch (input_type) {
- case DataType::Type::kInt64:
- // Type conversion from long to short is a result of code transformations.
- if (in.IsRegisterPair()) {
- __ movzxw(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>());
- } else if (in.IsDoubleStackSlot()) {
- __ movzxw(out.AsRegister<Register>(), Address(ESP, in.GetStackIndex()));
- } else {
- DCHECK(in.GetConstant()->IsLongConstant());
- int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
- __ movl(out.AsRegister<Register>(), Immediate(static_cast<uint16_t>(value)));
- }
- break;
- case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
- case DataType::Type::kInt8:
- case DataType::Type::kInt16:
- case DataType::Type::kInt32:
- // Processing a Dex `Process a Dex `int-to-char'' instruction.
- if (in.IsRegister()) {
- __ movzxw(out.AsRegister<Register>(), in.AsRegister<Register>());
- } else if (in.IsStackSlot()) {
- __ movzxw(out.AsRegister<Register>(), Address(ESP, in.GetStackIndex()));
- } else {
- DCHECK(in.GetConstant()->IsIntConstant());
- int32_t value = in.GetConstant()->AsIntConstant()->GetValue();
- __ movl(out.AsRegister<Register>(), Immediate(static_cast<uint16_t>(value)));
- }
- break;
-
- default:
- LOG(FATAL) << "Unexpected type conversion from " << input_type
- << " to " << result_type;
- }
- break;
-
case DataType::Type::kFloat32:
switch (input_type) {
case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
case DataType::Type::kInt16:
case DataType::Type::kInt32:
- case DataType::Type::kUint16:
- // Processing a Dex `int-to-float' instruction.
__ cvtsi2ss(out.AsFpuRegister<XmmRegister>(), in.AsRegister<Register>());
break;
case DataType::Type::kInt64: {
- // Processing a Dex `long-to-float' instruction.
size_t adjustment = 0;
// Create stack space for the call to
@@ -2886,7 +2854,6 @@
}
case DataType::Type::kFloat64:
- // Processing a Dex `double-to-float' instruction.
__ cvtsd2ss(out.AsFpuRegister<XmmRegister>(), in.AsFpuRegister<XmmRegister>());
break;
@@ -2899,17 +2866,15 @@
case DataType::Type::kFloat64:
switch (input_type) {
case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
case DataType::Type::kInt16:
case DataType::Type::kInt32:
- case DataType::Type::kUint16:
- // Processing a Dex `int-to-double' instruction.
__ cvtsi2sd(out.AsFpuRegister<XmmRegister>(), in.AsRegister<Register>());
break;
case DataType::Type::kInt64: {
- // Processing a Dex `long-to-double' instruction.
size_t adjustment = 0;
// Create stack space for the call to
@@ -2939,7 +2904,6 @@
}
case DataType::Type::kFloat32:
- // Processing a Dex `float-to-double' instruction.
__ cvtss2sd(out.AsFpuRegister<XmmRegister>(), in.AsFpuRegister<XmmRegister>());
break;
@@ -2957,7 +2921,7 @@
void LocationsBuilderX86::VisitAdd(HAdd* add) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(add, LocationSummary::kNoCall);
switch (add->GetResultType()) {
case DataType::Type::kInt32: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -3084,7 +3048,7 @@
void LocationsBuilderX86::VisitSub(HSub* sub) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(sub, LocationSummary::kNoCall);
switch (sub->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64: {
@@ -3190,7 +3154,7 @@
void LocationsBuilderX86::VisitMul(HMul* mul) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(mul, LocationSummary::kNoCall);
switch (mul->GetResultType()) {
case DataType::Type::kInt32:
locations->SetInAt(0, Location::RequiresRegister());
@@ -3617,7 +3581,7 @@
GenerateDivRemWithAnyConstant(instruction);
}
} else {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) DivRemMinusOneSlowPathX86(
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) DivRemMinusOneSlowPathX86(
instruction, out.AsRegister<Register>(), is_div);
codegen_->AddSlowPath(slow_path);
@@ -3666,7 +3630,7 @@
LocationSummary::CallKind call_kind = (div->GetResultType() == DataType::Type::kInt64)
? LocationSummary::kCallOnMainOnly
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(div, call_kind);
switch (div->GetResultType()) {
case DataType::Type::kInt32: {
@@ -3771,7 +3735,7 @@
LocationSummary::CallKind call_kind = (rem->GetResultType() == DataType::Type::kInt64)
? LocationSummary::kCallOnMainOnly
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(rem, call_kind);
switch (type) {
case DataType::Type::kInt32: {
@@ -3832,6 +3796,7 @@
LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
switch (instruction->GetType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -3852,7 +3817,7 @@
}
void InstructionCodeGeneratorX86::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathX86(instruction);
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathX86(instruction);
codegen_->AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -3860,6 +3825,7 @@
switch (instruction->GetType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -3901,7 +3867,7 @@
DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(op, LocationSummary::kNoCall);
switch (op->GetResultType()) {
case DataType::Type::kInt32:
@@ -4096,7 +4062,7 @@
void LocationsBuilderX86::VisitRor(HRor* ror) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(ror, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(ror, LocationSummary::kNoCall);
switch (ror->GetResultType()) {
case DataType::Type::kInt64:
@@ -4204,8 +4170,8 @@
}
void LocationsBuilderX86::VisitNewInstance(HNewInstance* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
locations->SetOut(Location::RegisterLocation(EAX));
if (instruction->IsStringAlloc()) {
locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
@@ -4233,8 +4199,8 @@
}
void LocationsBuilderX86::VisitNewArray(HNewArray* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
locations->SetOut(Location::RegisterLocation(EAX));
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -4253,7 +4219,7 @@
void LocationsBuilderX86::VisitParameterValue(HParameterValue* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
if (location.IsStackSlot()) {
location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
@@ -4269,7 +4235,7 @@
void LocationsBuilderX86::VisitCurrentMethod(HCurrentMethod* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
}
@@ -4278,7 +4244,7 @@
void LocationsBuilderX86::VisitClassTableGet(HClassTableGet* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
@@ -4304,7 +4270,7 @@
void LocationsBuilderX86::VisitNot(HNot* not_) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(not_, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
@@ -4331,7 +4297,7 @@
void LocationsBuilderX86::VisitBooleanNot(HBooleanNot* bool_not) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(bool_not, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(bool_not, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
@@ -4346,12 +4312,13 @@
void LocationsBuilderX86::VisitCompare(HCompare* compare) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(compare, LocationSummary::kNoCall);
switch (compare->InputAt(0)->GetType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
- case DataType::Type::kInt16:
case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
case DataType::Type::kInt32:
case DataType::Type::kInt64: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -4388,9 +4355,10 @@
switch (compare->InputAt(0)->GetType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
- case DataType::Type::kInt16:
case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
case DataType::Type::kInt32: {
codegen_->GenerateIntCompare(left, right);
break;
@@ -4463,7 +4431,7 @@
void LocationsBuilderX86::VisitPhi(HPhi* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
locations->SetInAt(i, Location::Any());
}
@@ -4746,10 +4714,10 @@
bool object_field_get_with_read_barrier =
kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction,
- kEmitCompilerReadBarrier ?
- LocationSummary::kCallOnSlowPath :
- LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction,
+ kEmitCompilerReadBarrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -4792,7 +4760,8 @@
uint32_t offset = field_info.GetFieldOffset().Uint32Value();
switch (field_type) {
- case DataType::Type::kBool: {
+ case DataType::Type::kBool:
+ case DataType::Type::kUint8: {
__ movzxb(out.AsRegister<Register>(), Address(base, offset));
break;
}
@@ -4802,13 +4771,13 @@
break;
}
- case DataType::Type::kInt16: {
- __ movsxw(out.AsRegister<Register>(), Address(base, offset));
+ case DataType::Type::kUint16: {
+ __ movzxw(out.AsRegister<Register>(), Address(base, offset));
break;
}
- case DataType::Type::kUint16: {
- __ movzxw(out.AsRegister<Register>(), Address(base, offset));
+ case DataType::Type::kInt16: {
+ __ movsxw(out.AsRegister<Register>(), Address(base, offset));
break;
}
@@ -4893,12 +4862,11 @@
DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
bool is_volatile = field_info.IsVolatile();
DataType::Type field_type = field_info.GetFieldType();
- bool is_byte_type = (field_type == DataType::Type::kBool)
- || (field_type == DataType::Type::kInt8);
+ bool is_byte_type = DataType::Size(field_type) == 1u;
// The register allocator does not support multiple
// inputs that die at entry with one in a specific register.
@@ -4957,13 +4925,14 @@
switch (field_type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8: {
__ movb(Address(base, offset), value.AsRegister<ByteRegister>());
break;
}
- case DataType::Type::kInt16:
- case DataType::Type::kUint16: {
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16: {
if (value.IsConstant()) {
__ movw(Address(base, offset),
Immediate(CodeGenerator::GetInt16ValueOf(value.GetConstant())));
@@ -5180,7 +5149,7 @@
}
void CodeGeneratorX86::GenerateExplicitNullCheck(HNullCheck* instruction) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathX86(instruction);
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) NullCheckSlowPathX86(instruction);
AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -5207,10 +5176,10 @@
bool object_array_get_with_read_barrier =
kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction,
- object_array_get_with_read_barrier ?
- LocationSummary::kCallOnSlowPath :
- LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction,
+ object_array_get_with_read_barrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -5242,7 +5211,8 @@
DataType::Type type = instruction->GetType();
switch (type) {
- case DataType::Type::kBool: {
+ case DataType::Type::kBool:
+ case DataType::Type::kUint8: {
Register out = out_loc.AsRegister<Register>();
__ movzxb(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_1, data_offset));
break;
@@ -5254,12 +5224,6 @@
break;
}
- case DataType::Type::kInt16: {
- Register out = out_loc.AsRegister<Register>();
- __ movsxw(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_2, data_offset));
- break;
- }
-
case DataType::Type::kUint16: {
Register out = out_loc.AsRegister<Register>();
if (mirror::kUseStringCompression && instruction->IsStringCharAt()) {
@@ -5284,6 +5248,12 @@
break;
}
+ case DataType::Type::kInt16: {
+ Register out = out_loc.AsRegister<Register>();
+ __ movsxw(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_2, data_offset));
+ break;
+ }
+
case DataType::Type::kInt32: {
Register out = out_loc.AsRegister<Register>();
__ movl(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_4, data_offset));
@@ -5362,14 +5332,13 @@
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
instruction,
may_need_runtime_call_for_type_check ?
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall);
- bool is_byte_type = (value_type == DataType::Type::kBool)
- || (value_type == DataType::Type::kInt8);
+ bool is_byte_type = DataType::Size(value_type) == 1u;
// We need the inputs to be different than the output in case of long operation.
// In case of a byte operation, the register allocator does not support multiple
// inputs that die at entry with one in a specific register.
@@ -5407,6 +5376,7 @@
switch (value_type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8: {
uint32_t offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_1, offset);
@@ -5419,8 +5389,8 @@
break;
}
- case DataType::Type::kInt16:
- case DataType::Type::kUint16: {
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16: {
uint32_t offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_2, offset);
if (value.IsRegister()) {
@@ -5457,7 +5427,7 @@
Location temp_loc = locations->GetTemp(0);
Register temp = temp_loc.AsRegister<Register>();
if (may_need_runtime_call_for_type_check) {
- slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathX86(instruction);
+ slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathX86(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
__ testl(register_value, register_value);
@@ -5600,7 +5570,7 @@
}
void LocationsBuilderX86::VisitArrayLength(HArrayLength* instruction) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
if (!instruction->IsEmittedAtUseSite()) {
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
@@ -5648,7 +5618,7 @@
Location index_loc = locations->InAt(0);
Location length_loc = locations->InAt(1);
SlowPathCode* slow_path =
- new (GetGraph()->GetArena()) BoundsCheckSlowPathX86(instruction);
+ new (GetGraph()->GetAllocator()) BoundsCheckSlowPathX86(instruction);
if (length_loc.IsConstant()) {
int32_t length = CodeGenerator::GetInt32ValueOf(length_loc.GetConstant());
@@ -5714,8 +5684,8 @@
}
void LocationsBuilderX86::VisitSuspendCheck(HSuspendCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
// In suspend check slow path, usually there are no caller-save registers at all.
// If SIMD instructions are present, however, we force spilling all live SIMD
// registers in full width (since the runtime only saves/restores lower part).
@@ -5742,7 +5712,7 @@
SuspendCheckSlowPathX86* slow_path =
down_cast<SuspendCheckSlowPathX86*>(instruction->GetSlowPath());
if (slow_path == nullptr) {
- slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathX86(instruction, successor);
+ slow_path = new (GetGraph()->GetAllocator()) SuspendCheckSlowPathX86(instruction, successor);
instruction->SetSlowPath(slow_path);
codegen_->AddSlowPath(slow_path);
if (successor != nullptr) {
@@ -6074,7 +6044,7 @@
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(cls, call_kind);
if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -6195,7 +6165,7 @@
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86(
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathX86(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
codegen_->AddSlowPath(slow_path);
@@ -6214,7 +6184,7 @@
void LocationsBuilderX86::VisitClinitCheck(HClinitCheck* check) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
+ new (GetGraph()->GetAllocator()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
locations->SetInAt(0, Location::RequiresRegister());
if (check->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -6223,7 +6193,7 @@
void InstructionCodeGeneratorX86::VisitClinitCheck(HClinitCheck* check) {
// We assume the class to not be null.
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86(
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathX86(
check->GetLoadClass(), check, check->GetDexPc(), true);
codegen_->AddSlowPath(slow_path);
GenerateClassInitializationCheck(slow_path,
@@ -6259,7 +6229,7 @@
void LocationsBuilderX86::VisitLoadString(HLoadString* load) {
LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind);
HLoadString::LoadKind load_kind = load->GetLoadKind();
if (load_kind == HLoadString::LoadKind::kBootImageLinkTimePcRelative ||
load_kind == HLoadString::LoadKind::kBootImageInternTable ||
@@ -6330,7 +6300,7 @@
Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
// /* GcRoot<mirror::String> */ out = *address /* PC-relative */
GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86(load);
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadStringSlowPathX86(load);
codegen_->AddSlowPath(slow_path);
__ testl(out, out);
__ j(kEqual, slow_path->GetEntryLabel());
@@ -6363,7 +6333,7 @@
void LocationsBuilderX86::VisitLoadException(HLoadException* load) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(load, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -6372,7 +6342,7 @@
}
void LocationsBuilderX86::VisitClearException(HClearException* clear) {
- new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(clear, LocationSummary::kNoCall);
}
void InstructionCodeGeneratorX86::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
@@ -6380,8 +6350,8 @@
}
void LocationsBuilderX86::VisitThrow(HThrow* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
}
@@ -6433,7 +6403,8 @@
break;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
if (baker_read_barrier_slow_path) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -6610,8 +6581,8 @@
__ cmpl(out, Address(ESP, cls.GetStackIndex()));
}
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ j(kNotEqual, slow_path->GetEntryLabel());
__ movl(out, Immediate(1));
@@ -6642,8 +6613,8 @@
// call to the runtime not using a type checking slow path).
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ jmp(slow_path->GetEntryLabel());
if (zero.IsLinked()) {
@@ -6691,7 +6662,8 @@
IsTypeCheckSlowPathFatal(type_check_kind, throws_into_catch)
? LocationSummary::kNoCall
: LocationSummary::kCallOnSlowPath;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
if (type_check_kind == TypeCheckKind::kInterfaceCheck) {
// Require a register for the interface check since there is a loop that compares the class to
@@ -6734,8 +6706,8 @@
IsTypeCheckSlowPathFatal(type_check_kind, instruction->CanThrowIntoCatchBlock());
SlowPathCode* type_check_slow_path =
- new (GetGraph()->GetArena()) TypeCheckSlowPathX86(instruction,
- is_type_check_slow_path_fatal);
+ new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86(instruction,
+ is_type_check_slow_path_fatal);
codegen_->AddSlowPath(type_check_slow_path);
NearLabel done;
@@ -6932,8 +6904,8 @@
}
void LocationsBuilderX86::VisitMonitorOperation(HMonitorOperation* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
}
@@ -6956,7 +6928,7 @@
void LocationsBuilderX86::HandleBitwiseOperation(HBinaryOperation* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
DCHECK(instruction->GetResultType() == DataType::Type::kInt32
|| instruction->GetResultType() == DataType::Type::kInt64);
locations->SetInAt(0, Location::RequiresRegister());
@@ -7178,7 +7150,7 @@
"have different sizes.");
// Slow path marking the GC root `root`.
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86(
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathX86(
instruction, root, /* unpoison_ref_before_marking */ false);
codegen_->AddSlowPath(slow_path);
@@ -7308,10 +7280,10 @@
SlowPathCode* slow_path;
if (always_update_field) {
DCHECK(temp != nullptr);
- slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkAndUpdateFieldSlowPathX86(
+ slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86(
instruction, ref, obj, src, /* unpoison_ref_before_marking */ true, *temp);
} else {
- slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86(
+ slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathX86(
instruction, ref, /* unpoison_ref_before_marking */ true);
}
AddSlowPath(slow_path);
@@ -7344,7 +7316,7 @@
// not used by the artReadBarrierSlow entry point.
//
// TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
- SlowPathCode* slow_path = new (GetGraph()->GetArena())
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator())
ReadBarrierForHeapReferenceSlowPathX86(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
@@ -7380,7 +7352,7 @@
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCode* slow_path =
- new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathX86(instruction, out, root);
+ new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathX86(instruction, out, root);
AddSlowPath(slow_path);
__ jmp(slow_path->GetEntryLabel());
@@ -7400,7 +7372,7 @@
// Simple implementation of packed switch - generate cascaded compare/jumps.
void LocationsBuilderX86::VisitPackedSwitch(HPackedSwitch* switch_instr) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -7467,7 +7439,7 @@
void LocationsBuilderX86::VisitX86PackedSwitch(HX86PackedSwitch* switch_instr) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
// Constant area pointer.
@@ -7522,7 +7494,7 @@
void LocationsBuilderX86::VisitX86ComputeBaseMethodAddress(
HX86ComputeBaseMethodAddress* insn) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(insn, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(insn, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -7546,7 +7518,7 @@
void LocationsBuilderX86::VisitX86LoadFromConstantTable(
HX86LoadFromConstantTable* insn) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(insn, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(insn, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::ConstantLocation(insn->GetConstant()));
@@ -7706,28 +7678,31 @@
HX86ComputeBaseMethodAddress* method_base,
Register reg) {
AssemblerFixup* fixup =
- new (GetGraph()->GetArena()) RIPFixup(*this, method_base, __ AddDouble(v));
+ new (GetGraph()->GetAllocator()) RIPFixup(*this, method_base, __ AddDouble(v));
return Address(reg, kDummy32BitOffset, fixup);
}
Address CodeGeneratorX86::LiteralFloatAddress(float v,
HX86ComputeBaseMethodAddress* method_base,
Register reg) {
- AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, method_base, __ AddFloat(v));
+ AssemblerFixup* fixup =
+ new (GetGraph()->GetAllocator()) RIPFixup(*this, method_base, __ AddFloat(v));
return Address(reg, kDummy32BitOffset, fixup);
}
Address CodeGeneratorX86::LiteralInt32Address(int32_t v,
HX86ComputeBaseMethodAddress* method_base,
Register reg) {
- AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, method_base, __ AddInt32(v));
+ AssemblerFixup* fixup =
+ new (GetGraph()->GetAllocator()) RIPFixup(*this, method_base, __ AddInt32(v));
return Address(reg, kDummy32BitOffset, fixup);
}
Address CodeGeneratorX86::LiteralInt64Address(int64_t v,
HX86ComputeBaseMethodAddress* method_base,
Register reg) {
- AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, method_base, __ AddInt64(v));
+ AssemblerFixup* fixup =
+ new (GetGraph()->GetAllocator()) RIPFixup(*this, method_base, __ AddInt64(v));
return Address(reg, kDummy32BitOffset, fixup);
}
@@ -7777,7 +7752,7 @@
Register value) {
// Create a fixup to be used to create and address the jump table.
JumpTableRIPFixup* table_fixup =
- new (GetGraph()->GetArena()) JumpTableRIPFixup(*this, switch_instr);
+ new (GetGraph()->GetAllocator()) JumpTableRIPFixup(*this, switch_instr);
// We have to populate the jump tables.
fixups_to_jump_tables_.push_back(table_fixup);
@@ -7803,13 +7778,13 @@
// TODO: Consider pairs in the parallel move resolver, then this could be nicely merged
// with the else branch.
if (type == DataType::Type::kInt64) {
- HParallelMove parallel_move(GetGraph()->GetArena());
+ HParallelMove parallel_move(GetGraph()->GetAllocator());
parallel_move.AddMove(return_loc.ToLow(), target.ToLow(), DataType::Type::kInt32, nullptr);
parallel_move.AddMove(return_loc.ToHigh(), target.ToHigh(), DataType::Type::kInt32, nullptr);
GetMoveResolver()->EmitNativeCode(¶llel_move);
} else {
// Let the parallel move resolver take care of all of this.
- HParallelMove parallel_move(GetGraph()->GetArena());
+ HParallelMove parallel_move(GetGraph()->GetAllocator());
parallel_move.AddMove(return_loc, target, type, nullptr);
GetMoveResolver()->EmitNativeCode(¶llel_move);
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 42704e9..b6aa110 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -427,7 +427,7 @@
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(
locations->InAt(0),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
@@ -831,7 +831,7 @@
// We're moving two or three locations to locations that could
// overlap, so we need a parallel move resolver.
InvokeRuntimeCallingConvention calling_convention;
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
parallel_move.AddMove(ref_,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
DataType::Type::kReference,
@@ -1230,19 +1230,19 @@
block_labels_(nullptr),
location_builder_(graph, this),
instruction_visitor_(graph, this),
- move_resolver_(graph->GetArena(), this),
- assembler_(graph->GetArena()),
+ move_resolver_(graph->GetAllocator(), this),
+ assembler_(graph->GetAllocator()),
isa_features_(isa_features),
constant_area_start_(0),
- boot_image_method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- method_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- boot_image_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- string_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ boot_image_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ boot_image_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ jit_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ jit_class_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ fixups_to_jump_tables_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
}
@@ -1521,6 +1521,7 @@
DataType::Type type = condition->InputAt(0)->GetType();
switch (type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -1701,7 +1702,7 @@
}
void LocationsBuilderX86_64::VisitIf(HIf* if_instr) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(if_instr);
if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) {
locations->SetInAt(0, Location::Any());
}
@@ -1718,7 +1719,7 @@
}
void LocationsBuilderX86_64::VisitDeoptimize(HDeoptimize* deoptimize) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
InvokeRuntimeCallingConvention calling_convention;
RegisterSet caller_saves = RegisterSet::Empty();
@@ -1738,7 +1739,7 @@
}
void LocationsBuilderX86_64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
- LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(flag, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -1766,7 +1767,7 @@
}
void LocationsBuilderX86_64::VisitSelect(HSelect* select) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(select);
if (DataType::IsFloatingPointType(select->GetType())) {
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::Any());
@@ -1846,7 +1847,7 @@
}
void LocationsBuilderX86_64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
- new (GetGraph()->GetArena()) LocationSummary(info);
+ new (GetGraph()->GetAllocator()) LocationSummary(info);
}
void InstructionCodeGeneratorX86_64::VisitNativeDebugInfo(HNativeDebugInfo*) {
@@ -1859,7 +1860,7 @@
void LocationsBuilderX86_64::HandleCondition(HCondition* cond) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(cond, LocationSummary::kNoCall);
// Handle the long/FP comparisons made in instruction simplification.
switch (cond->InputAt(0)->GetType()) {
case DataType::Type::kInt64:
@@ -2033,12 +2034,13 @@
void LocationsBuilderX86_64::VisitCompare(HCompare* compare) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(compare, LocationSummary::kNoCall);
switch (compare->InputAt(0)->GetType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
- case DataType::Type::kInt16:
case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
case DataType::Type::kInt32:
case DataType::Type::kInt64: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -2070,9 +2072,10 @@
switch (type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
- case DataType::Type::kInt16:
case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
case DataType::Type::kInt32: {
codegen_->GenerateIntCompare(left, right);
break;
@@ -2129,7 +2132,7 @@
void LocationsBuilderX86_64::VisitIntConstant(HIntConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -2139,7 +2142,7 @@
void LocationsBuilderX86_64::VisitNullConstant(HNullConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -2149,7 +2152,7 @@
void LocationsBuilderX86_64::VisitLongConstant(HLongConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -2159,7 +2162,7 @@
void LocationsBuilderX86_64::VisitFloatConstant(HFloatConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -2169,7 +2172,7 @@
void LocationsBuilderX86_64::VisitDoubleConstant(HDoubleConstant* constant) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
@@ -2205,14 +2208,15 @@
void LocationsBuilderX86_64::VisitReturn(HReturn* ret) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(ret, LocationSummary::kNoCall);
switch (ret->InputAt(0)->GetType()) {
+ case DataType::Type::kReference:
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
case DataType::Type::kInt32:
- case DataType::Type::kReference:
case DataType::Type::kInt64:
locations->SetInAt(0, Location::RegisterLocation(RAX));
break;
@@ -2230,12 +2234,13 @@
void InstructionCodeGeneratorX86_64::VisitReturn(HReturn* ret) {
if (kIsDebugBuild) {
switch (ret->InputAt(0)->GetType()) {
+ case DataType::Type::kReference:
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
case DataType::Type::kInt32:
- case DataType::Type::kReference:
case DataType::Type::kInt64:
DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegister<CpuRegister>().AsRegister(), RAX);
break;
@@ -2255,12 +2260,13 @@
Location InvokeDexCallingConventionVisitorX86_64::GetReturnLocation(DataType::Type type) const {
switch (type) {
+ case DataType::Type::kReference:
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
case DataType::Type::kInt32:
- case DataType::Type::kReference:
case DataType::Type::kInt64:
return Location::RegisterLocation(RAX);
@@ -2281,12 +2287,13 @@
Location InvokeDexCallingConventionVisitorX86_64::GetNextLocation(DataType::Type type) {
switch (type) {
+ case DataType::Type::kReference:
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
- case DataType::Type::kInt32:
- case DataType::Type::kReference: {
+ case DataType::Type::kInt32: {
uint32_t index = gp_index_++;
stack_index_++;
if (index < calling_convention.GetNumberOfRegisters()) {
@@ -2467,7 +2474,7 @@
void LocationsBuilderX86_64::VisitNeg(HNeg* neg) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall);
switch (neg->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64:
@@ -2533,71 +2540,35 @@
void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(conversion, LocationSummary::kNoCall);
DataType::Type result_type = conversion->GetResultType();
DataType::Type input_type = conversion->GetInputType();
- DCHECK_NE(result_type, input_type);
-
- // The Java language does not allow treating boolean as an integral type but
- // our bit representation makes it safe.
+ DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type))
+ << input_type << " -> " << result_type;
switch (result_type) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
- switch (input_type) {
- case DataType::Type::kInt64:
- // Type conversion from long to byte is a result of code transformations.
- case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
- case DataType::Type::kInt16:
- case DataType::Type::kInt32:
- case DataType::Type::kUint16:
- // Processing a Dex `int-to-byte' instruction.
- locations->SetInAt(0, Location::Any());
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
- break;
-
- default:
- LOG(FATAL) << "Unexpected type conversion from " << input_type
- << " to " << result_type;
- }
- break;
-
+ case DataType::Type::kUint16:
case DataType::Type::kInt16:
- switch (input_type) {
- case DataType::Type::kInt64:
- // Type conversion from long to short is a result of code transformations.
- case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
- case DataType::Type::kInt8:
- case DataType::Type::kInt32:
- case DataType::Type::kUint16:
- // Processing a Dex `int-to-short' instruction.
- locations->SetInAt(0, Location::Any());
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
- break;
-
- default:
- LOG(FATAL) << "Unexpected type conversion from " << input_type
- << " to " << result_type;
- }
+ DCHECK(DataType::IsIntegralType(input_type)) << input_type;
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
case DataType::Type::kInt32:
switch (input_type) {
case DataType::Type::kInt64:
- // Processing a Dex `long-to-int' instruction.
locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
case DataType::Type::kFloat32:
- // Processing a Dex `float-to-int' instruction.
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
break;
case DataType::Type::kFloat64:
- // Processing a Dex `double-to-int' instruction.
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
break;
@@ -2611,12 +2582,11 @@
case DataType::Type::kInt64:
switch (input_type) {
case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
case DataType::Type::kInt16:
case DataType::Type::kInt32:
- case DataType::Type::kUint16:
- // Processing a Dex `int-to-long' instruction.
// TODO: We would benefit from a (to-be-implemented)
// Location::RegisterOrStackSlot requirement for this input.
locations->SetInAt(0, Location::RequiresRegister());
@@ -2624,13 +2594,11 @@
break;
case DataType::Type::kFloat32:
- // Processing a Dex `float-to-long' instruction.
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
break;
case DataType::Type::kFloat64:
- // Processing a Dex `double-to-long' instruction.
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
break;
@@ -2641,47 +2609,24 @@
}
break;
- case DataType::Type::kUint16:
- switch (input_type) {
- case DataType::Type::kInt64:
- // Type conversion from long to char is a result of code transformations.
- case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
- case DataType::Type::kInt8:
- case DataType::Type::kInt16:
- case DataType::Type::kInt32:
- // Processing a Dex `int-to-char' instruction.
- locations->SetInAt(0, Location::Any());
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
- break;
-
- default:
- LOG(FATAL) << "Unexpected type conversion from " << input_type
- << " to " << result_type;
- }
- break;
-
case DataType::Type::kFloat32:
switch (input_type) {
case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
case DataType::Type::kInt16:
case DataType::Type::kInt32:
- case DataType::Type::kUint16:
- // Processing a Dex `int-to-float' instruction.
locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresFpuRegister());
break;
case DataType::Type::kInt64:
- // Processing a Dex `long-to-float' instruction.
locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresFpuRegister());
break;
case DataType::Type::kFloat64:
- // Processing a Dex `double-to-float' instruction.
locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
break;
@@ -2695,24 +2640,21 @@
case DataType::Type::kFloat64:
switch (input_type) {
case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
case DataType::Type::kInt16:
case DataType::Type::kInt32:
- case DataType::Type::kUint16:
- // Processing a Dex `int-to-double' instruction.
locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresFpuRegister());
break;
case DataType::Type::kInt64:
- // Processing a Dex `long-to-double' instruction.
locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresFpuRegister());
break;
case DataType::Type::kFloat32:
- // Processing a Dex `float-to-double' instruction.
locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
break;
@@ -2735,18 +2677,40 @@
Location in = locations->InAt(0);
DataType::Type result_type = conversion->GetResultType();
DataType::Type input_type = conversion->GetInputType();
- DCHECK_NE(result_type, input_type);
+ DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type))
+ << input_type << " -> " << result_type;
switch (result_type) {
- case DataType::Type::kInt8:
+ case DataType::Type::kUint8:
switch (input_type) {
- case DataType::Type::kInt64:
- // Type conversion from long to byte is a result of code transformations.
- case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
+ case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
case DataType::Type::kInt16:
case DataType::Type::kInt32:
+ case DataType::Type::kInt64:
+ if (in.IsRegister()) {
+ __ movzxb(out.AsRegister<CpuRegister>(), in.AsRegister<CpuRegister>());
+ } else if (in.IsStackSlot() || in.IsDoubleStackSlot()) {
+ __ movzxb(out.AsRegister<CpuRegister>(),
+ Address(CpuRegister(RSP), in.GetStackIndex()));
+ } else {
+ __ movl(out.AsRegister<CpuRegister>(),
+ Immediate(static_cast<uint8_t>(Int64FromConstant(in.GetConstant()))));
+ }
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case DataType::Type::kInt8:
+ switch (input_type) {
+ case DataType::Type::kUint8:
case DataType::Type::kUint16:
- // Processing a Dex `int-to-byte' instruction.
+ case DataType::Type::kInt16:
+ case DataType::Type::kInt32:
+ case DataType::Type::kInt64:
if (in.IsRegister()) {
__ movsxb(out.AsRegister<CpuRegister>(), in.AsRegister<CpuRegister>());
} else if (in.IsStackSlot() || in.IsDoubleStackSlot()) {
@@ -2764,16 +2728,34 @@
}
break;
+ case DataType::Type::kUint16:
+ switch (input_type) {
+ case DataType::Type::kInt8:
+ case DataType::Type::kInt16:
+ case DataType::Type::kInt32:
+ case DataType::Type::kInt64:
+ if (in.IsRegister()) {
+ __ movzxw(out.AsRegister<CpuRegister>(), in.AsRegister<CpuRegister>());
+ } else if (in.IsStackSlot() || in.IsDoubleStackSlot()) {
+ __ movzxw(out.AsRegister<CpuRegister>(),
+ Address(CpuRegister(RSP), in.GetStackIndex()));
+ } else {
+ __ movl(out.AsRegister<CpuRegister>(),
+ Immediate(static_cast<uint16_t>(Int64FromConstant(in.GetConstant()))));
+ }
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case DataType::Type::kInt16:
switch (input_type) {
- case DataType::Type::kInt64:
- // Type conversion from long to short is a result of code transformations.
- case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
- case DataType::Type::kInt8:
- case DataType::Type::kInt32:
case DataType::Type::kUint16:
- // Processing a Dex `int-to-short' instruction.
+ case DataType::Type::kInt32:
+ case DataType::Type::kInt64:
if (in.IsRegister()) {
__ movsxw(out.AsRegister<CpuRegister>(), in.AsRegister<CpuRegister>());
} else if (in.IsStackSlot() || in.IsDoubleStackSlot()) {
@@ -2794,7 +2776,6 @@
case DataType::Type::kInt32:
switch (input_type) {
case DataType::Type::kInt64:
- // Processing a Dex `long-to-int' instruction.
if (in.IsRegister()) {
__ movl(out.AsRegister<CpuRegister>(), in.AsRegister<CpuRegister>());
} else if (in.IsDoubleStackSlot()) {
@@ -2809,7 +2790,6 @@
break;
case DataType::Type::kFloat32: {
- // Processing a Dex `float-to-int' instruction.
XmmRegister input = in.AsFpuRegister<XmmRegister>();
CpuRegister output = out.AsRegister<CpuRegister>();
NearLabel done, nan;
@@ -2831,7 +2811,6 @@
}
case DataType::Type::kFloat64: {
- // Processing a Dex `double-to-int' instruction.
XmmRegister input = in.AsFpuRegister<XmmRegister>();
CpuRegister output = out.AsRegister<CpuRegister>();
NearLabel done, nan;
@@ -2862,18 +2841,16 @@
switch (input_type) {
DCHECK(out.IsRegister());
case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
case DataType::Type::kInt16:
case DataType::Type::kInt32:
- case DataType::Type::kUint16:
- // Processing a Dex `int-to-long' instruction.
DCHECK(in.IsRegister());
__ movsxd(out.AsRegister<CpuRegister>(), in.AsRegister<CpuRegister>());
break;
case DataType::Type::kFloat32: {
- // Processing a Dex `float-to-long' instruction.
XmmRegister input = in.AsFpuRegister<XmmRegister>();
CpuRegister output = out.AsRegister<CpuRegister>();
NearLabel done, nan;
@@ -2895,7 +2872,6 @@
}
case DataType::Type::kFloat64: {
- // Processing a Dex `double-to-long' instruction.
XmmRegister input = in.AsFpuRegister<XmmRegister>();
CpuRegister output = out.AsRegister<CpuRegister>();
NearLabel done, nan;
@@ -2922,42 +2898,14 @@
}
break;
- case DataType::Type::kUint16:
- switch (input_type) {
- case DataType::Type::kInt64:
- // Type conversion from long to char is a result of code transformations.
- case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
- case DataType::Type::kInt8:
- case DataType::Type::kInt16:
- case DataType::Type::kInt32:
- // Processing a Dex `int-to-char' instruction.
- if (in.IsRegister()) {
- __ movzxw(out.AsRegister<CpuRegister>(), in.AsRegister<CpuRegister>());
- } else if (in.IsStackSlot() || in.IsDoubleStackSlot()) {
- __ movzxw(out.AsRegister<CpuRegister>(),
- Address(CpuRegister(RSP), in.GetStackIndex()));
- } else {
- __ movl(out.AsRegister<CpuRegister>(),
- Immediate(static_cast<uint16_t>(Int64FromConstant(in.GetConstant()))));
- }
- break;
-
- default:
- LOG(FATAL) << "Unexpected type conversion from " << input_type
- << " to " << result_type;
- }
- break;
-
case DataType::Type::kFloat32:
switch (input_type) {
case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
case DataType::Type::kInt16:
case DataType::Type::kInt32:
- case DataType::Type::kUint16:
- // Processing a Dex `int-to-float' instruction.
if (in.IsRegister()) {
__ cvtsi2ss(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), false);
} else if (in.IsConstant()) {
@@ -2971,7 +2919,6 @@
break;
case DataType::Type::kInt64:
- // Processing a Dex `long-to-float' instruction.
if (in.IsRegister()) {
__ cvtsi2ss(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), true);
} else if (in.IsConstant()) {
@@ -2985,7 +2932,6 @@
break;
case DataType::Type::kFloat64:
- // Processing a Dex `double-to-float' instruction.
if (in.IsFpuRegister()) {
__ cvtsd2ss(out.AsFpuRegister<XmmRegister>(), in.AsFpuRegister<XmmRegister>());
} else if (in.IsConstant()) {
@@ -3007,12 +2953,11 @@
case DataType::Type::kFloat64:
switch (input_type) {
case DataType::Type::kBool:
- // Boolean input is a result of code transformations.
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
case DataType::Type::kInt16:
case DataType::Type::kInt32:
- case DataType::Type::kUint16:
- // Processing a Dex `int-to-double' instruction.
if (in.IsRegister()) {
__ cvtsi2sd(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), false);
} else if (in.IsConstant()) {
@@ -3026,7 +2971,6 @@
break;
case DataType::Type::kInt64:
- // Processing a Dex `long-to-double' instruction.
if (in.IsRegister()) {
__ cvtsi2sd(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), true);
} else if (in.IsConstant()) {
@@ -3040,7 +2984,6 @@
break;
case DataType::Type::kFloat32:
- // Processing a Dex `float-to-double' instruction.
if (in.IsFpuRegister()) {
__ cvtss2sd(out.AsFpuRegister<XmmRegister>(), in.AsFpuRegister<XmmRegister>());
} else if (in.IsConstant()) {
@@ -3067,7 +3010,7 @@
void LocationsBuilderX86_64::VisitAdd(HAdd* add) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(add, LocationSummary::kNoCall);
switch (add->GetResultType()) {
case DataType::Type::kInt32: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -3191,7 +3134,7 @@
void LocationsBuilderX86_64::VisitSub(HSub* sub) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(sub, LocationSummary::kNoCall);
switch (sub->GetResultType()) {
case DataType::Type::kInt32: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -3282,7 +3225,7 @@
void LocationsBuilderX86_64::VisitMul(HMul* mul) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(mul, LocationSummary::kNoCall);
switch (mul->GetResultType()) {
case DataType::Type::kInt32: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -3706,7 +3649,7 @@
}
} else {
SlowPathCode* slow_path =
- new (GetGraph()->GetArena()) DivRemMinusOneSlowPathX86_64(
+ new (GetGraph()->GetAllocator()) DivRemMinusOneSlowPathX86_64(
instruction, out.AsRegister(), type, is_div);
codegen_->AddSlowPath(slow_path);
@@ -3735,7 +3678,7 @@
void LocationsBuilderX86_64::VisitDiv(HDiv* div) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(div, LocationSummary::kNoCall);
switch (div->GetResultType()) {
case DataType::Type::kInt32:
case DataType::Type::kInt64: {
@@ -3818,7 +3761,7 @@
void LocationsBuilderX86_64::VisitRem(HRem* rem) {
DataType::Type type = rem->GetResultType();
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(rem, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(rem, LocationSummary::kNoCall);
switch (type) {
case DataType::Type::kInt32:
@@ -3875,7 +3818,7 @@
void InstructionCodeGeneratorX86_64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
SlowPathCode* slow_path =
- new (GetGraph()->GetArena()) DivZeroCheckSlowPathX86_64(instruction);
+ new (GetGraph()->GetAllocator()) DivZeroCheckSlowPathX86_64(instruction);
codegen_->AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -3883,6 +3826,7 @@
switch (instruction->GetType()) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -3925,7 +3869,7 @@
DCHECK(op->IsShl() || op->IsShr() || op->IsUShr());
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(op, LocationSummary::kNoCall);
switch (op->GetResultType()) {
case DataType::Type::kInt32:
@@ -4001,7 +3945,7 @@
void LocationsBuilderX86_64::VisitRor(HRor* ror) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(ror, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(ror, LocationSummary::kNoCall);
switch (ror->GetResultType()) {
case DataType::Type::kInt32:
@@ -4073,8 +4017,8 @@
}
void LocationsBuilderX86_64::VisitNewInstance(HNewInstance* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
if (instruction->IsStringAlloc()) {
locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
@@ -4102,8 +4046,8 @@
}
void LocationsBuilderX86_64::VisitNewArray(HNewArray* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetOut(Location::RegisterLocation(RAX));
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -4122,7 +4066,7 @@
void LocationsBuilderX86_64::VisitParameterValue(HParameterValue* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
if (location.IsStackSlot()) {
location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
@@ -4139,7 +4083,7 @@
void LocationsBuilderX86_64::VisitCurrentMethod(HCurrentMethod* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
}
@@ -4150,7 +4094,7 @@
void LocationsBuilderX86_64::VisitClassTableGet(HClassTableGet* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
@@ -4175,7 +4119,7 @@
void LocationsBuilderX86_64::VisitNot(HNot* not_) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(not_, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
@@ -4201,7 +4145,7 @@
void LocationsBuilderX86_64::VisitBooleanNot(HBooleanNot* bool_not) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(bool_not, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(bool_not, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
@@ -4216,7 +4160,7 @@
void LocationsBuilderX86_64::VisitPhi(HPhi* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
locations->SetInAt(i, Location::Any());
}
@@ -4257,10 +4201,10 @@
bool object_field_get_with_read_barrier =
kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction,
- object_field_get_with_read_barrier ?
- LocationSummary::kCallOnSlowPath :
- LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction,
+ object_field_get_with_read_barrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -4290,7 +4234,8 @@
uint32_t offset = field_info.GetFieldOffset().Uint32Value();
switch (field_type) {
- case DataType::Type::kBool: {
+ case DataType::Type::kBool:
+ case DataType::Type::kUint8: {
__ movzxb(out.AsRegister<CpuRegister>(), Address(base, offset));
break;
}
@@ -4300,13 +4245,13 @@
break;
}
- case DataType::Type::kInt16: {
- __ movsxw(out.AsRegister<CpuRegister>(), Address(base, offset));
+ case DataType::Type::kUint16: {
+ __ movzxw(out.AsRegister<CpuRegister>(), Address(base, offset));
break;
}
- case DataType::Type::kUint16: {
- __ movzxw(out.AsRegister<CpuRegister>(), Address(base, offset));
+ case DataType::Type::kInt16: {
+ __ movsxw(out.AsRegister<CpuRegister>(), Address(base, offset));
break;
}
@@ -4381,7 +4326,7 @@
DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
DataType::Type field_type = field_info.GetFieldType();
bool is_volatile = field_info.IsVolatile();
bool needs_write_barrier =
@@ -4433,6 +4378,7 @@
switch (field_type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8: {
if (value.IsConstant()) {
__ movb(Address(base, offset),
@@ -4443,8 +4389,8 @@
break;
}
- case DataType::Type::kInt16:
- case DataType::Type::kUint16: {
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16: {
if (value.IsConstant()) {
__ movw(Address(base, offset),
Immediate(CodeGenerator::GetInt16ValueOf(value.GetConstant())));
@@ -4656,7 +4602,7 @@
}
void CodeGeneratorX86_64::GenerateExplicitNullCheck(HNullCheck* instruction) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathX86_64(instruction);
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) NullCheckSlowPathX86_64(instruction);
AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -4683,10 +4629,10 @@
bool object_array_get_with_read_barrier =
kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference);
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction,
- object_array_get_with_read_barrier ?
- LocationSummary::kCallOnSlowPath :
- LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction,
+ object_array_get_with_read_barrier
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -4714,7 +4660,8 @@
DataType::Type type = instruction->GetType();
switch (type) {
- case DataType::Type::kBool: {
+ case DataType::Type::kBool:
+ case DataType::Type::kUint8: {
CpuRegister out = out_loc.AsRegister<CpuRegister>();
__ movzxb(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_1, data_offset));
break;
@@ -4726,12 +4673,6 @@
break;
}
- case DataType::Type::kInt16: {
- CpuRegister out = out_loc.AsRegister<CpuRegister>();
- __ movsxw(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_2, data_offset));
- break;
- }
-
case DataType::Type::kUint16: {
CpuRegister out = out_loc.AsRegister<CpuRegister>();
if (mirror::kUseStringCompression && instruction->IsStringCharAt()) {
@@ -4754,6 +4695,12 @@
break;
}
+ case DataType::Type::kInt16: {
+ CpuRegister out = out_loc.AsRegister<CpuRegister>();
+ __ movsxw(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_2, data_offset));
+ break;
+ }
+
case DataType::Type::kInt32: {
CpuRegister out = out_loc.AsRegister<CpuRegister>();
__ movl(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_4, data_offset));
@@ -4828,7 +4775,7 @@
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
instruction,
may_need_runtime_call_for_type_check ?
LocationSummary::kCallOnSlowPath :
@@ -4865,6 +4812,7 @@
switch (value_type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8: {
uint32_t offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_1, offset);
@@ -4877,8 +4825,8 @@
break;
}
- case DataType::Type::kInt16:
- case DataType::Type::kUint16: {
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16: {
uint32_t offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_2, offset);
if (value.IsRegister()) {
@@ -4916,7 +4864,7 @@
Location temp_loc = locations->GetTemp(0);
CpuRegister temp = temp_loc.AsRegister<CpuRegister>();
if (may_need_runtime_call_for_type_check) {
- slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathX86_64(instruction);
+ slow_path = new (GetGraph()->GetAllocator()) ArraySetSlowPathX86_64(instruction);
codegen_->AddSlowPath(slow_path);
if (instruction->GetValueCanBeNull()) {
__ testl(register_value, register_value);
@@ -5054,7 +5002,7 @@
void LocationsBuilderX86_64::VisitArrayLength(HArrayLength* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
if (!instruction->IsEmittedAtUseSite()) {
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
@@ -5095,7 +5043,7 @@
LocationSummary* locations = instruction->GetLocations();
Location index_loc = locations->InAt(0);
Location length_loc = locations->InAt(1);
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathX86_64(instruction);
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) BoundsCheckSlowPathX86_64(instruction);
if (length_loc.IsConstant()) {
int32_t length = CodeGenerator::GetInt32ValueOf(length_loc.GetConstant());
@@ -5181,8 +5129,8 @@
}
void LocationsBuilderX86_64::VisitSuspendCheck(HSuspendCheck* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
// In suspend check slow path, usually there are no caller-save registers at all.
// If SIMD instructions are present, however, we force spilling all live SIMD
// registers in full width (since the runtime only saves/restores lower part).
@@ -5209,7 +5157,7 @@
SuspendCheckSlowPathX86_64* slow_path =
down_cast<SuspendCheckSlowPathX86_64*>(instruction->GetSlowPath());
if (slow_path == nullptr) {
- slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathX86_64(instruction, successor);
+ slow_path = new (GetGraph()->GetAllocator()) SuspendCheckSlowPathX86_64(instruction, successor);
instruction->SetSlowPath(slow_path);
codegen_->AddSlowPath(slow_path);
if (successor != nullptr) {
@@ -5491,7 +5439,7 @@
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(cls, call_kind);
if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -5607,7 +5555,7 @@
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86_64(
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathX86_64(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
@@ -5624,7 +5572,7 @@
void LocationsBuilderX86_64::VisitClinitCheck(HClinitCheck* check) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
+ new (GetGraph()->GetAllocator()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
locations->SetInAt(0, Location::RequiresRegister());
if (check->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -5633,7 +5581,7 @@
void InstructionCodeGeneratorX86_64::VisitClinitCheck(HClinitCheck* check) {
// We assume the class to not be null.
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86_64(
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadClassSlowPathX86_64(
check->GetLoadClass(), check, check->GetDexPc(), true);
codegen_->AddSlowPath(slow_path);
GenerateClassInitializationCheck(slow_path,
@@ -5660,7 +5608,7 @@
void LocationsBuilderX86_64::VisitLoadString(HLoadString* load) {
LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind);
if (load->GetLoadKind() == HLoadString::LoadKind::kRuntimeCall) {
locations->SetOut(Location::RegisterLocation(RAX));
} else {
@@ -5723,7 +5671,7 @@
Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
// /* GcRoot<mirror::Class> */ out = *address /* PC-relative */
GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86_64(load);
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) LoadStringSlowPathX86_64(load);
codegen_->AddSlowPath(slow_path);
__ testl(out, out);
__ j(kEqual, slow_path->GetEntryLabel());
@@ -5759,7 +5707,7 @@
void LocationsBuilderX86_64::VisitLoadException(HLoadException* load) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(load, LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
}
@@ -5768,7 +5716,7 @@
}
void LocationsBuilderX86_64::VisitClearException(HClearException* clear) {
- new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(clear, LocationSummary::kNoCall);
}
void InstructionCodeGeneratorX86_64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
@@ -5776,8 +5724,8 @@
}
void LocationsBuilderX86_64::VisitThrow(HThrow* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
}
@@ -5827,7 +5775,8 @@
break;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
if (baker_read_barrier_slow_path) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -6012,8 +5961,8 @@
__ cmpl(out, Address(CpuRegister(RSP), cls.GetStackIndex()));
}
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86_64(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ j(kNotEqual, slow_path->GetEntryLabel());
__ movl(out, Immediate(1));
@@ -6044,8 +5993,8 @@
// call to the runtime not using a type checking slow path).
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
- slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(instruction,
- /* is_fatal */ false);
+ slow_path = new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86_64(instruction,
+ /* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ jmp(slow_path->GetEntryLabel());
if (zero.IsLinked()) {
@@ -6093,7 +6042,8 @@
LocationSummary::CallKind call_kind = is_fatal_slow_path
? LocationSummary::kNoCall
: LocationSummary::kCallOnSlowPath;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations =
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
if (type_check_kind == TypeCheckKind::kInterfaceCheck) {
// Require a register for the interface check since there is a loop that compares the class to
@@ -6138,8 +6088,8 @@
bool is_type_check_slow_path_fatal =
IsTypeCheckSlowPathFatal(type_check_kind, instruction->CanThrowIntoCatchBlock());
SlowPathCode* type_check_slow_path =
- new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(instruction,
- is_type_check_slow_path_fatal);
+ new (GetGraph()->GetAllocator()) TypeCheckSlowPathX86_64(instruction,
+ is_type_check_slow_path_fatal);
codegen_->AddSlowPath(type_check_slow_path);
@@ -6337,8 +6287,8 @@
}
void LocationsBuilderX86_64::VisitMonitorOperation(HMonitorOperation* instruction) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
+ instruction, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
}
@@ -6360,7 +6310,7 @@
void LocationsBuilderX86_64::HandleBitwiseOperation(HBinaryOperation* instruction) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
DCHECK(instruction->GetResultType() == DataType::Type::kInt32
|| instruction->GetResultType() == DataType::Type::kInt64);
locations->SetInAt(0, Location::RequiresRegister());
@@ -6564,7 +6514,7 @@
"have different sizes.");
// Slow path marking the GC root `root`.
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86_64(
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathX86_64(
instruction, root, /* unpoison_ref_before_marking */ false);
codegen_->AddSlowPath(slow_path);
@@ -6696,10 +6646,10 @@
if (always_update_field) {
DCHECK(temp1 != nullptr);
DCHECK(temp2 != nullptr);
- slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkAndUpdateFieldSlowPathX86_64(
+ slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86_64(
instruction, ref, obj, src, /* unpoison_ref_before_marking */ true, *temp1, *temp2);
} else {
- slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathX86_64(
+ slow_path = new (GetGraph()->GetAllocator()) ReadBarrierMarkSlowPathX86_64(
instruction, ref, /* unpoison_ref_before_marking */ true);
}
AddSlowPath(slow_path);
@@ -6732,7 +6682,7 @@
// not used by the artReadBarrierSlow entry point.
//
// TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
- SlowPathCode* slow_path = new (GetGraph()->GetArena())
+ SlowPathCode* slow_path = new (GetGraph()->GetAllocator())
ReadBarrierForHeapReferenceSlowPathX86_64(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
@@ -6768,7 +6718,7 @@
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
SlowPathCode* slow_path =
- new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathX86_64(instruction, out, root);
+ new (GetGraph()->GetAllocator()) ReadBarrierForRootSlowPathX86_64(instruction, out, root);
AddSlowPath(slow_path);
__ jmp(slow_path->GetEntryLabel());
@@ -6788,7 +6738,7 @@
// Simple implementation of packed switch - generate cascaded compare/jumps.
void LocationsBuilderX86_64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
@@ -7076,22 +7026,22 @@
}
Address CodeGeneratorX86_64::LiteralDoubleAddress(double v) {
- AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddDouble(v));
+ AssemblerFixup* fixup = new (GetGraph()->GetAllocator()) RIPFixup(*this, __ AddDouble(v));
return Address::RIP(fixup);
}
Address CodeGeneratorX86_64::LiteralFloatAddress(float v) {
- AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddFloat(v));
+ AssemblerFixup* fixup = new (GetGraph()->GetAllocator()) RIPFixup(*this, __ AddFloat(v));
return Address::RIP(fixup);
}
Address CodeGeneratorX86_64::LiteralInt32Address(int32_t v) {
- AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddInt32(v));
+ AssemblerFixup* fixup = new (GetGraph()->GetAllocator()) RIPFixup(*this, __ AddInt32(v));
return Address::RIP(fixup);
}
Address CodeGeneratorX86_64::LiteralInt64Address(int64_t v) {
- AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddInt64(v));
+ AssemblerFixup* fixup = new (GetGraph()->GetAllocator()) RIPFixup(*this, __ AddInt64(v));
return Address::RIP(fixup);
}
@@ -7110,7 +7060,7 @@
}
// Let the parallel move resolver take care of all of this.
- HParallelMove parallel_move(GetGraph()->GetArena());
+ HParallelMove parallel_move(GetGraph()->GetAllocator());
parallel_move.AddMove(return_loc, trg, type, nullptr);
GetMoveResolver()->EmitNativeCode(¶llel_move);
}
@@ -7118,7 +7068,7 @@
Address CodeGeneratorX86_64::LiteralCaseTable(HPackedSwitch* switch_instr) {
// Create a fixup to be used to create and address the jump table.
JumpTableRIPFixup* table_fixup =
- new (GetGraph()->GetArena()) JumpTableRIPFixup(*this, switch_instr);
+ new (GetGraph()->GetAllocator()) JumpTableRIPFixup(*this, switch_instr);
// We have to populate the jump tables.
fixups_to_jump_tables_.push_back(table_fixup);
diff --git a/compiler/optimizing/code_sinking.cc b/compiler/optimizing/code_sinking.cc
index b558eb1..d8ebac9 100644
--- a/compiler/optimizing/code_sinking.cc
+++ b/compiler/optimizing/code_sinking.cc
@@ -16,6 +16,10 @@
#include "code_sinking.h"
+#include "base/arena_bit_vector.h"
+#include "base/bit_vector-inl.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
#include "common_dominator.h"
#include "nodes.h"
@@ -115,7 +119,7 @@
static void AddInstruction(HInstruction* instruction,
const ArenaBitVector& processed_instructions,
const ArenaBitVector& discard_blocks,
- ArenaVector<HInstruction*>* worklist) {
+ ScopedArenaVector<HInstruction*>* worklist) {
// Add to the work list if the instruction is not in the list of blocks
// to discard, hasn't been already processed and is of interest.
if (!discard_blocks.IsBitSet(instruction->GetBlock()->GetBlockId()) &&
@@ -128,7 +132,7 @@
static void AddInputs(HInstruction* instruction,
const ArenaBitVector& processed_instructions,
const ArenaBitVector& discard_blocks,
- ArenaVector<HInstruction*>* worklist) {
+ ScopedArenaVector<HInstruction*>* worklist) {
for (HInstruction* input : instruction->GetInputs()) {
AddInstruction(input, processed_instructions, discard_blocks, worklist);
}
@@ -137,7 +141,7 @@
static void AddInputs(HBasicBlock* block,
const ArenaBitVector& processed_instructions,
const ArenaBitVector& discard_blocks,
- ArenaVector<HInstruction*>* worklist) {
+ ScopedArenaVector<HInstruction*>* worklist) {
for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
AddInputs(it.Current(), processed_instructions, discard_blocks, worklist);
}
@@ -242,17 +246,19 @@
void CodeSinking::SinkCodeToUncommonBranch(HBasicBlock* end_block) {
- // Local allocator to discard data structures created below at the end of
- // this optimization.
- ArenaAllocator allocator(graph_->GetArena()->GetArenaPool());
+ // Local allocator to discard data structures created below at the end of this optimization.
+ ScopedArenaAllocator allocator(graph_->GetArenaStack());
size_t number_of_instructions = graph_->GetCurrentInstructionId();
- ArenaVector<HInstruction*> worklist(allocator.Adapter(kArenaAllocMisc));
+ ScopedArenaVector<HInstruction*> worklist(allocator.Adapter(kArenaAllocMisc));
ArenaBitVector processed_instructions(&allocator, number_of_instructions, /* expandable */ false);
+ processed_instructions.ClearAllBits();
ArenaBitVector post_dominated(&allocator, graph_->GetBlocks().size(), /* expandable */ false);
+ post_dominated.ClearAllBits();
ArenaBitVector instructions_that_can_move(
&allocator, number_of_instructions, /* expandable */ false);
- ArenaVector<HInstruction*> move_in_order(allocator.Adapter(kArenaAllocMisc));
+ instructions_that_can_move.ClearAllBits();
+ ScopedArenaVector<HInstruction*> move_in_order(allocator.Adapter(kArenaAllocMisc));
// Step (1): Visit post order to get a subset of blocks post dominated by `end_block`.
// TODO(ngeoffray): Getting the full set of post-dominated shoud be done by
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 896fcfa..e35c7c7 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -72,34 +72,37 @@
return v;
}
-static void TestCode(const uint16_t* data,
- bool has_result = false,
- int32_t expected = 0) {
+class CodegenTest : public OptimizingUnitTest {
+ protected:
+ void TestCode(const uint16_t* data, bool has_result = false, int32_t expected = 0);
+ void TestCodeLong(const uint16_t* data, bool has_result, int64_t expected);
+ void TestComparison(IfCondition condition,
+ int64_t i,
+ int64_t j,
+ DataType::Type type,
+ const CodegenTargetConfig target_config);
+};
+
+void CodegenTest::TestCode(const uint16_t* data, bool has_result, int32_t expected) {
for (const CodegenTargetConfig& target_config : GetTargetConfigs()) {
- ArenaPool pool;
- ArenaAllocator arena(&pool);
- HGraph* graph = CreateCFG(&arena, data);
+ ResetPoolAndAllocator();
+ HGraph* graph = CreateCFG(data);
// Remove suspend checks, they cannot be executed in this context.
RemoveSuspendChecks(graph);
RunCode(target_config, graph, [](HGraph*) {}, has_result, expected);
}
}
-static void TestCodeLong(const uint16_t* data,
- bool has_result,
- int64_t expected) {
+void CodegenTest::TestCodeLong(const uint16_t* data, bool has_result, int64_t expected) {
for (const CodegenTargetConfig& target_config : GetTargetConfigs()) {
- ArenaPool pool;
- ArenaAllocator arena(&pool);
- HGraph* graph = CreateCFG(&arena, data, DataType::Type::kInt64);
+ ResetPoolAndAllocator();
+ HGraph* graph = CreateCFG(data, DataType::Type::kInt64);
// Remove suspend checks, they cannot be executed in this context.
RemoveSuspendChecks(graph);
RunCode(target_config, graph, [](HGraph*) {}, has_result, expected);
}
}
-class CodegenTest : public CommonCompilerTest {};
-
TEST_F(CodegenTest, ReturnVoid) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(Instruction::RETURN_VOID);
TestCode(data);
@@ -412,28 +415,25 @@
TEST_F(CodegenTest, NonMaterializedCondition) {
for (CodegenTargetConfig target_config : GetTargetConfigs()) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ HGraph* graph = CreateGraph();
- HGraph* graph = CreateGraph(&allocator);
-
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- entry->AddInstruction(new (&allocator) HGoto());
+ entry->AddInstruction(new (GetAllocator()) HGoto());
- HBasicBlock* first_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* first_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(first_block);
entry->AddSuccessor(first_block);
HIntConstant* constant0 = graph->GetIntConstant(0);
HIntConstant* constant1 = graph->GetIntConstant(1);
- HEqual* equal = new (&allocator) HEqual(constant0, constant0);
+ HEqual* equal = new (GetAllocator()) HEqual(constant0, constant0);
first_block->AddInstruction(equal);
- first_block->AddInstruction(new (&allocator) HIf(equal));
+ first_block->AddInstruction(new (GetAllocator()) HIf(equal));
- HBasicBlock* then_block = new (&allocator) HBasicBlock(graph);
- HBasicBlock* else_block = new (&allocator) HBasicBlock(graph);
- HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* then_block = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* else_block = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph);
graph->SetExitBlock(exit_block);
graph->AddBlock(then_block);
@@ -444,9 +444,9 @@
then_block->AddSuccessor(exit_block);
else_block->AddSuccessor(exit_block);
- exit_block->AddInstruction(new (&allocator) HExit());
- then_block->AddInstruction(new (&allocator) HReturn(constant0));
- else_block->AddInstruction(new (&allocator) HReturn(constant1));
+ exit_block->AddInstruction(new (GetAllocator()) HExit());
+ then_block->AddInstruction(new (GetAllocator()) HReturn(constant0));
+ else_block->AddInstruction(new (GetAllocator()) HReturn(constant1));
ASSERT_FALSE(equal->IsEmittedAtUseSite());
graph->BuildDominatorTree();
@@ -455,7 +455,7 @@
auto hook_before_codegen = [](HGraph* graph_in) {
HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors()[0];
- HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
+ HParallelMove* move = new (graph_in->GetAllocator()) HParallelMove(graph_in->GetAllocator());
block->InsertInstructionBefore(move, block->GetLastInstruction());
};
@@ -475,19 +475,17 @@
int rhs[] = {2, 1, 2, -1, 0xabc};
for (size_t i = 0; i < arraysize(lhs); i++) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
+ HGraph* graph = CreateGraph();
- HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* entry_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry_block);
graph->SetEntryBlock(entry_block);
- entry_block->AddInstruction(new (&allocator) HGoto());
- HBasicBlock* code_block = new (&allocator) HBasicBlock(graph);
+ entry_block->AddInstruction(new (GetAllocator()) HGoto());
+ HBasicBlock* code_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(code_block);
- HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(exit_block);
- exit_block->AddInstruction(new (&allocator) HExit());
+ exit_block->AddInstruction(new (GetAllocator()) HExit());
entry_block->AddSuccessor(code_block);
code_block->AddSuccessor(exit_block);
@@ -503,7 +501,8 @@
graph->BuildDominatorTree();
auto hook_before_codegen = [](HGraph* graph_in) {
HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors()[0];
- HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
+ HParallelMove* move =
+ new (graph_in->GetAllocator()) HParallelMove(graph_in->GetAllocator());
block->InsertInstructionBefore(move, block->GetLastInstruction());
};
RunCode(target_config, graph, hook_before_codegen, true, lhs[i] < rhs[i]);
@@ -523,24 +522,22 @@
for (size_t i = 0; i < arraysize(lhs); i++) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
+ HGraph* graph = CreateGraph();
- HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* entry_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry_block);
graph->SetEntryBlock(entry_block);
- entry_block->AddInstruction(new (&allocator) HGoto());
+ entry_block->AddInstruction(new (GetAllocator()) HGoto());
- HBasicBlock* if_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* if_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(if_block);
- HBasicBlock* if_true_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* if_true_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(if_true_block);
- HBasicBlock* if_false_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* if_false_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(if_false_block);
- HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(exit_block);
- exit_block->AddInstruction(new (&allocator) HExit());
+ exit_block->AddInstruction(new (GetAllocator()) HExit());
graph->SetEntryBlock(entry_block);
entry_block->AddSuccessor(if_block);
@@ -571,7 +568,8 @@
graph->BuildDominatorTree();
auto hook_before_codegen = [](HGraph* graph_in) {
HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors()[0];
- HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
+ HParallelMove* move =
+ new (graph_in->GetAllocator()) HParallelMove(graph_in->GetAllocator());
block->InsertInstructionBefore(move, block->GetLastInstruction());
};
RunCode(target_config, graph, hook_before_codegen, true, lhs[i] < rhs[i]);
@@ -599,27 +597,25 @@
}
// Helper method.
-static void TestComparison(IfCondition condition,
- int64_t i,
- int64_t j,
- DataType::Type type,
- const CodegenTargetConfig target_config) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
+void CodegenTest::TestComparison(IfCondition condition,
+ int64_t i,
+ int64_t j,
+ DataType::Type type,
+ const CodegenTargetConfig target_config) {
+ HGraph* graph = CreateGraph();
- HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* entry_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry_block);
graph->SetEntryBlock(entry_block);
- entry_block->AddInstruction(new (&allocator) HGoto());
+ entry_block->AddInstruction(new (GetAllocator()) HGoto());
- HBasicBlock* block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
- HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(exit_block);
graph->SetExitBlock(exit_block);
- exit_block->AddInstruction(new (&allocator) HExit());
+ exit_block->AddInstruction(new (GetAllocator()) HExit());
entry_block->AddSuccessor(block);
block->AddSuccessor(exit_block);
@@ -641,48 +637,48 @@
const uint64_t y = j;
switch (condition) {
case kCondEQ:
- comparison = new (&allocator) HEqual(op1, op2);
+ comparison = new (GetAllocator()) HEqual(op1, op2);
expected_result = (i == j);
break;
case kCondNE:
- comparison = new (&allocator) HNotEqual(op1, op2);
+ comparison = new (GetAllocator()) HNotEqual(op1, op2);
expected_result = (i != j);
break;
case kCondLT:
- comparison = new (&allocator) HLessThan(op1, op2);
+ comparison = new (GetAllocator()) HLessThan(op1, op2);
expected_result = (i < j);
break;
case kCondLE:
- comparison = new (&allocator) HLessThanOrEqual(op1, op2);
+ comparison = new (GetAllocator()) HLessThanOrEqual(op1, op2);
expected_result = (i <= j);
break;
case kCondGT:
- comparison = new (&allocator) HGreaterThan(op1, op2);
+ comparison = new (GetAllocator()) HGreaterThan(op1, op2);
expected_result = (i > j);
break;
case kCondGE:
- comparison = new (&allocator) HGreaterThanOrEqual(op1, op2);
+ comparison = new (GetAllocator()) HGreaterThanOrEqual(op1, op2);
expected_result = (i >= j);
break;
case kCondB:
- comparison = new (&allocator) HBelow(op1, op2);
+ comparison = new (GetAllocator()) HBelow(op1, op2);
expected_result = (x < y);
break;
case kCondBE:
- comparison = new (&allocator) HBelowOrEqual(op1, op2);
+ comparison = new (GetAllocator()) HBelowOrEqual(op1, op2);
expected_result = (x <= y);
break;
case kCondA:
- comparison = new (&allocator) HAbove(op1, op2);
+ comparison = new (GetAllocator()) HAbove(op1, op2);
expected_result = (x > y);
break;
case kCondAE:
- comparison = new (&allocator) HAboveOrEqual(op1, op2);
+ comparison = new (GetAllocator()) HAboveOrEqual(op1, op2);
expected_result = (x >= y);
break;
}
block->AddInstruction(comparison);
- block->AddInstruction(new (&allocator) HReturn(comparison));
+ block->AddInstruction(new (GetAllocator()) HReturn(comparison));
graph->BuildDominatorTree();
RunCode(target_config, graph, [](HGraph*) {}, true, expected_result);
@@ -718,9 +714,7 @@
TEST_F(CodegenTest, ARMVIXLParallelMoveResolver) {
std::unique_ptr<const ArmInstructionSetFeatures> features(
ArmInstructionSetFeatures::FromCppDefines());
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
+ HGraph* graph = CreateGraph();
arm::CodeGeneratorARMVIXL codegen(graph, *features.get(), CompilerOptions());
codegen.Initialize();
@@ -729,7 +723,7 @@
// int mem2) which was faulty (before the fix). So previously GPR and FP scratch registers were
// used as temps; however GPR scratch register is required for big stack offsets which don't fit
// LDR encoding. So the following code is a regression test for that situation.
- HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena());
+ HParallelMove* move = new (graph->GetAllocator()) HParallelMove(graph->GetAllocator());
move->AddMove(Location::StackSlot(0), Location::StackSlot(8192), DataType::Type::kInt32, nullptr);
move->AddMove(Location::StackSlot(8192), Location::StackSlot(0), DataType::Type::kInt32, nullptr);
codegen.GetMoveResolver()->EmitNativeCode(move);
@@ -744,9 +738,7 @@
TEST_F(CodegenTest, ARM64ParallelMoveResolverB34760542) {
std::unique_ptr<const Arm64InstructionSetFeatures> features(
Arm64InstructionSetFeatures::FromCppDefines());
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
+ HGraph* graph = CreateGraph();
arm64::CodeGeneratorARM64 codegen(graph, *features.get(), CompilerOptions());
codegen.Initialize();
@@ -777,7 +769,7 @@
// The solution used so far is to use a floating-point temp register
// (D31) in step #2, so that IP1 is available for step #3.
- HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena());
+ HParallelMove* move = new (graph->GetAllocator()) HParallelMove(graph->GetAllocator());
move->AddMove(Location::DoubleStackSlot(0),
Location::DoubleStackSlot(257),
DataType::Type::kFloat64,
@@ -796,16 +788,14 @@
TEST_F(CodegenTest, ARM64ParallelMoveResolverSIMD) {
std::unique_ptr<const Arm64InstructionSetFeatures> features(
Arm64InstructionSetFeatures::FromCppDefines());
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
+ HGraph* graph = CreateGraph();
arm64::CodeGeneratorARM64 codegen(graph, *features.get(), CompilerOptions());
codegen.Initialize();
graph->SetHasSIMD(true);
for (int i = 0; i < 2; i++) {
- HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena());
+ HParallelMove* move = new (graph->GetAllocator()) HParallelMove(graph->GetAllocator());
move->AddMove(Location::SIMDStackSlot(0),
Location::SIMDStackSlot(257),
DataType::Type::kFloat64,
@@ -841,33 +831,31 @@
return;
}
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
+ HGraph* graph = CreateGraph();
- HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* entry_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry_block);
graph->SetEntryBlock(entry_block);
- entry_block->AddInstruction(new (&allocator) HGoto());
+ entry_block->AddInstruction(new (GetAllocator()) HGoto());
- HBasicBlock* block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
- HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(exit_block);
graph->SetExitBlock(exit_block);
- exit_block->AddInstruction(new (&allocator) HExit());
+ exit_block->AddInstruction(new (GetAllocator()) HExit());
entry_block->AddSuccessor(block);
block->AddSuccessor(exit_block);
// To simplify matters, don't create PC-relative HLoadClass or HLoadString.
// Instead, generate HMipsComputeBaseMethodAddress directly.
- HMipsComputeBaseMethodAddress* base = new (&allocator) HMipsComputeBaseMethodAddress();
+ HMipsComputeBaseMethodAddress* base = new (GetAllocator()) HMipsComputeBaseMethodAddress();
block->AddInstruction(base);
// HMipsComputeBaseMethodAddress is defined as int, so just make the
// compiled method return it.
- block->AddInstruction(new (&allocator) HReturn(base));
+ block->AddInstruction(new (GetAllocator()) HReturn(base));
graph->BuildDominatorTree();
diff --git a/compiler/optimizing/codegen_test_utils.h b/compiler/optimizing/codegen_test_utils.h
index aa4f5da..bcbcc12 100644
--- a/compiler/optimizing/codegen_test_utils.h
+++ b/compiler/optimizing/codegen_test_utils.h
@@ -295,10 +295,15 @@
const std::function<void(HGraph*)>& hook_before_codegen,
bool has_result,
Expected expected) {
- SsaLivenessAnalysis liveness(graph, codegen);
- PrepareForRegisterAllocation(graph).Run();
- liveness.Analyze();
- RegisterAllocator::Create(graph->GetArena(), codegen, liveness)->AllocateRegisters();
+ {
+ ScopedArenaAllocator local_allocator(graph->GetArenaStack());
+ SsaLivenessAnalysis liveness(graph, codegen, &local_allocator);
+ PrepareForRegisterAllocation(graph).Run();
+ liveness.Analyze();
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(&local_allocator, codegen, liveness);
+ register_allocator->AllocateRegisters();
+ }
hook_before_codegen(graph);
InternalCodeAllocator allocator;
codegen->Compile(&allocator);
@@ -331,7 +336,7 @@
CodeGenerator* create_codegen_arm_vixl32(HGraph* graph, const CompilerOptions& compiler_options) {
std::unique_ptr<const ArmInstructionSetFeatures> features_arm(
ArmInstructionSetFeatures::FromCppDefines());
- return new (graph->GetArena())
+ return new (graph->GetAllocator())
TestCodeGeneratorARMVIXL(graph, *features_arm.get(), compiler_options);
}
#endif
@@ -340,7 +345,7 @@
CodeGenerator* create_codegen_arm64(HGraph* graph, const CompilerOptions& compiler_options) {
std::unique_ptr<const Arm64InstructionSetFeatures> features_arm64(
Arm64InstructionSetFeatures::FromCppDefines());
- return new (graph->GetArena())
+ return new (graph->GetAllocator())
TestCodeGeneratorARM64(graph, *features_arm64.get(), compiler_options);
}
#endif
@@ -349,7 +354,8 @@
CodeGenerator* create_codegen_x86(HGraph* graph, const CompilerOptions& compiler_options) {
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
- return new (graph->GetArena()) TestCodeGeneratorX86(graph, *features_x86.get(), compiler_options);
+ return new (graph->GetAllocator()) TestCodeGeneratorX86(
+ graph, *features_x86.get(), compiler_options);
}
#endif
@@ -357,7 +363,7 @@
CodeGenerator* create_codegen_x86_64(HGraph* graph, const CompilerOptions& compiler_options) {
std::unique_ptr<const X86_64InstructionSetFeatures> features_x86_64(
X86_64InstructionSetFeatures::FromCppDefines());
- return new (graph->GetArena())
+ return new (graph->GetAllocator())
x86_64::CodeGeneratorX86_64(graph, *features_x86_64.get(), compiler_options);
}
#endif
@@ -366,7 +372,7 @@
CodeGenerator* create_codegen_mips(HGraph* graph, const CompilerOptions& compiler_options) {
std::unique_ptr<const MipsInstructionSetFeatures> features_mips(
MipsInstructionSetFeatures::FromCppDefines());
- return new (graph->GetArena())
+ return new (graph->GetAllocator())
mips::CodeGeneratorMIPS(graph, *features_mips.get(), compiler_options);
}
#endif
@@ -375,7 +381,7 @@
CodeGenerator* create_codegen_mips64(HGraph* graph, const CompilerOptions& compiler_options) {
std::unique_ptr<const Mips64InstructionSetFeatures> features_mips64(
Mips64InstructionSetFeatures::FromCppDefines());
- return new (graph->GetArena())
+ return new (graph->GetAllocator())
mips64::CodeGeneratorMIPS64(graph, *features_mips64.get(), compiler_options);
}
#endif
diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc
index c85a2e3..e1980e0 100644
--- a/compiler/optimizing/constant_folding_test.cc
+++ b/compiler/optimizing/constant_folding_test.cc
@@ -32,11 +32,9 @@
/**
* Fixture class for the constant folding and dce tests.
*/
-class ConstantFoldingTest : public CommonCompilerTest {
+class ConstantFoldingTest : public OptimizingUnitTest {
public:
- ConstantFoldingTest() : pool_(), allocator_(&pool_) {
- graph_ = CreateGraph(&allocator_);
- }
+ ConstantFoldingTest() : graph_(nullptr) { }
void TestCode(const uint16_t* data,
const std::string& expected_before,
@@ -44,7 +42,7 @@
const std::string& expected_after_dce,
const std::function<void(HGraph*)>& check_after_cf,
DataType::Type return_type = DataType::Type::kInt32) {
- graph_ = CreateCFG(&allocator_, data, return_type);
+ graph_ = CreateCFG(data, return_type);
TestCodeOnReadyGraph(expected_before,
expected_after_cf,
expected_after_dce,
@@ -88,8 +86,6 @@
EXPECT_EQ(expected_after_dce, actual_after_dce);
}
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
};
@@ -742,46 +738,46 @@
* in the bytecode, we need to set up the graph explicitly.
*/
TEST_F(ConstantFoldingTest, UnsignedComparisonsWithZero) {
- graph_ = CreateGraph(&allocator_);
- HBasicBlock* entry_block = new (&allocator_) HBasicBlock(graph_);
+ graph_ = CreateGraph();
+ HBasicBlock* entry_block = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry_block);
graph_->SetEntryBlock(entry_block);
- HBasicBlock* block = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(block);
- HBasicBlock* exit_block = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(exit_block);
graph_->SetExitBlock(exit_block);
entry_block->AddSuccessor(block);
block->AddSuccessor(exit_block);
// Make various unsigned comparisons with zero against a parameter.
- HInstruction* parameter = new (&allocator_) HParameterValue(
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32, true);
entry_block->AddInstruction(parameter);
- entry_block->AddInstruction(new (&allocator_) HGoto());
+ entry_block->AddInstruction(new (GetAllocator()) HGoto());
HInstruction* zero = graph_->GetIntConstant(0);
HInstruction* last;
- block->AddInstruction(last = new (&allocator_) HAbove(zero, parameter));
- block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
- block->AddInstruction(last = new (&allocator_) HAbove(parameter, zero));
- block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
- block->AddInstruction(last = new (&allocator_) HAboveOrEqual(zero, parameter));
- block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
- block->AddInstruction(last = new (&allocator_) HAboveOrEqual(parameter, zero));
- block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
- block->AddInstruction(last = new (&allocator_) HBelow(zero, parameter));
- block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
- block->AddInstruction(last = new (&allocator_) HBelow(parameter, zero));
- block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
- block->AddInstruction(last = new (&allocator_) HBelowOrEqual(zero, parameter));
- block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
- block->AddInstruction(last = new (&allocator_) HBelowOrEqual(parameter, zero));
- block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
- block->AddInstruction(new (&allocator_) HReturn(zero));
+ block->AddInstruction(last = new (GetAllocator()) HAbove(zero, parameter));
+ block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0));
+ block->AddInstruction(last = new (GetAllocator()) HAbove(parameter, zero));
+ block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0));
+ block->AddInstruction(last = new (GetAllocator()) HAboveOrEqual(zero, parameter));
+ block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0));
+ block->AddInstruction(last = new (GetAllocator()) HAboveOrEqual(parameter, zero));
+ block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0));
+ block->AddInstruction(last = new (GetAllocator()) HBelow(zero, parameter));
+ block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0));
+ block->AddInstruction(last = new (GetAllocator()) HBelow(parameter, zero));
+ block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0));
+ block->AddInstruction(last = new (GetAllocator()) HBelowOrEqual(zero, parameter));
+ block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0));
+ block->AddInstruction(last = new (GetAllocator()) HBelowOrEqual(parameter, zero));
+ block->AddInstruction(new (GetAllocator()) HSelect(last, parameter, parameter, 0));
+ block->AddInstruction(new (GetAllocator()) HReturn(zero));
- exit_block->AddInstruction(new (&allocator_) HExit());
+ exit_block->AddInstruction(new (GetAllocator()) HExit());
graph_->BuildDominatorTree();
diff --git a/compiler/optimizing/constructor_fence_redundancy_elimination.cc b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
index ff7ce60..4a66cd2 100644
--- a/compiler/optimizing/constructor_fence_redundancy_elimination.cc
+++ b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
@@ -17,6 +17,8 @@
#include "constructor_fence_redundancy_elimination.h"
#include "base/arena_allocator.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
namespace art {
@@ -27,7 +29,7 @@
public:
CFREVisitor(HGraph* graph, OptimizingCompilerStats* stats)
: HGraphVisitor(graph),
- scoped_allocator_(graph->GetArena()->GetArenaPool()),
+ scoped_allocator_(graph->GetArenaStack()),
candidate_fences_(scoped_allocator_.Adapter(kArenaAllocCFRE)),
candidate_fence_targets_(scoped_allocator_.Adapter(kArenaAllocCFRE)),
stats_(stats) {}
@@ -227,9 +229,8 @@
MaybeRecordStat(stats_, MethodCompilationStat::kConstructorFenceRemovedCFRE);
}
- // Phase-local heap memory allocator for CFRE optimizer. Storage obtained
- // through this allocator is immediately released when the CFRE optimizer is done.
- ArenaAllocator scoped_allocator_;
+ // Phase-local heap memory allocator for CFRE optimizer.
+ ScopedArenaAllocator scoped_allocator_;
// Set of constructor fences that we've seen in the current block.
// Each constructor fences acts as a guard for one or more `targets`.
@@ -237,11 +238,11 @@
//
// Fences are in succession order (e.g. fence[i] succeeds fence[i-1]
// within the same basic block).
- ArenaVector<HConstructorFence*> candidate_fences_;
+ ScopedArenaVector<HConstructorFence*> candidate_fences_;
// Stores a set of the fence targets, to allow faster lookup of whether
// a detected publish is a target of one of the candidate fences.
- ArenaHashSet<HInstruction*> candidate_fence_targets_;
+ ScopedArenaHashSet<HInstruction*> candidate_fence_targets_;
// Used to record stats about the optimization.
OptimizingCompilerStats* const stats_;
diff --git a/compiler/optimizing/data_type-inl.h b/compiler/optimizing/data_type-inl.h
index fbc0c12..e389bad 100644
--- a/compiler/optimizing/data_type-inl.h
+++ b/compiler/optimizing/data_type-inl.h
@@ -46,17 +46,19 @@
constexpr char DataType::TypeId(DataType::Type type) {
// Type id for visualizer.
+ // Types corresponding to Java types are given a lower-case version of their shorty character.
switch (type) {
- case DataType::Type::kBool: return 'z';
- case DataType::Type::kInt8: return 'b';
- case DataType::Type::kUint16: return 'c';
- case DataType::Type::kInt16: return 's';
- case DataType::Type::kInt32: return 'i';
- case DataType::Type::kInt64: return 'j';
- case DataType::Type::kFloat32: return 'f';
- case DataType::Type::kFloat64: return 'd';
- case DataType::Type::kReference: return 'l';
- case DataType::Type::kVoid: return 'v';
+ case DataType::Type::kBool: return 'z'; // Java boolean (Z).
+ case DataType::Type::kUint8: return 'a'; // The character before Java byte's 'b'.
+ case DataType::Type::kInt8: return 'b'; // Java byte (B).
+ case DataType::Type::kUint16: return 'c'; // Java char (C).
+ case DataType::Type::kInt16: return 's'; // Java short (S).
+ case DataType::Type::kInt32: return 'i'; // Java int (I).
+ case DataType::Type::kInt64: return 'j'; // Java long (J).
+ case DataType::Type::kFloat32: return 'f'; // Java float (F).
+ case DataType::Type::kFloat64: return 'd'; // Java double (D).
+ case DataType::Type::kReference: return 'l'; // Java reference (L).
+ case DataType::Type::kVoid: return 'v'; // Java void (V).
}
LOG(FATAL) << "Unreachable";
UNREACHABLE();
diff --git a/compiler/optimizing/data_type.cc b/compiler/optimizing/data_type.cc
index 6890617..3c99a76 100644
--- a/compiler/optimizing/data_type.cc
+++ b/compiler/optimizing/data_type.cc
@@ -21,6 +21,7 @@
static const char* kTypeNames[] = {
"Reference",
"Bool",
+ "Uint8",
"Int8",
"Uint16",
"Int16",
diff --git a/compiler/optimizing/data_type.h b/compiler/optimizing/data_type.h
index 08f9263..3b67efe 100644
--- a/compiler/optimizing/data_type.h
+++ b/compiler/optimizing/data_type.h
@@ -29,6 +29,7 @@
enum class Type : uint8_t {
kReference = 0,
kBool,
+ kUint8,
kInt8,
kUint16,
kInt16,
@@ -47,6 +48,7 @@
switch (type) {
case Type::kVoid:
case Type::kBool:
+ case Type::kUint8:
case Type::kInt8:
return 0;
case Type::kUint16:
@@ -71,6 +73,7 @@
case Type::kVoid:
return 0;
case Type::kBool:
+ case Type::kUint8:
case Type::kInt8:
return 1;
case Type::kUint16:
@@ -99,6 +102,7 @@
// our bit representation makes it safe.
switch (type) {
case Type::kBool:
+ case Type::kUint8:
case Type::kInt8:
case Type::kUint16:
case Type::kInt16:
@@ -118,10 +122,15 @@
return type == Type::kInt64 || type == Type::kFloat64;
}
+ static bool IsUnsignedType(Type type) {
+ return type == Type::kUint8 || type == Type::kUint16;
+ }
+
// Return the general kind of `type`, fusing integer-like types as Type::kInt.
static Type Kind(Type type) {
switch (type) {
case Type::kBool:
+ case Type::kUint8:
case Type::kInt8:
case Type::kInt16:
case Type::kUint16:
@@ -136,6 +145,8 @@
switch (type) {
case Type::kBool:
return std::numeric_limits<bool>::min();
+ case Type::kUint8:
+ return std::numeric_limits<uint8_t>::min();
case Type::kInt8:
return std::numeric_limits<int8_t>::min();
case Type::kUint16:
@@ -156,6 +167,8 @@
switch (type) {
case Type::kBool:
return std::numeric_limits<bool>::max();
+ case Type::kUint8:
+ return std::numeric_limits<uint8_t>::max();
case Type::kInt8:
return std::numeric_limits<int8_t>::max();
case Type::kUint16:
@@ -172,6 +185,8 @@
return 0;
}
+ static bool IsTypeConversionImplicit(Type input_type, Type result_type);
+
static const char* PrettyDescriptor(Type type);
private:
@@ -179,6 +194,25 @@
};
std::ostream& operator<<(std::ostream& os, DataType::Type data_type);
+// Defined outside DataType to have the operator<< available for DCHECK_NE().
+inline bool DataType::IsTypeConversionImplicit(Type input_type, Type result_type) {
+ DCHECK_NE(DataType::Type::kVoid, result_type);
+ DCHECK_NE(DataType::Type::kVoid, input_type);
+
+ // Invariant: We should never generate a conversion to a Boolean value.
+ DCHECK_NE(DataType::Type::kBool, result_type);
+
+ // Besides conversion to the same type, integral conversions to non-Int64 types
+ // are implicit if the result value range covers the input value range, i.e.
+ // widening conversions that do not need to trim the sign bits.
+ return result_type == input_type ||
+ (result_type != Type::kInt64 &&
+ IsIntegralType(input_type) &&
+ IsIntegralType(result_type) &&
+ MinValueOfIntegralType(input_type) >= MinValueOfIntegralType(result_type) &&
+ MaxValueOfIntegralType(input_type) <= MaxValueOfIntegralType(result_type));
+}
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_DATA_TYPE_H_
diff --git a/compiler/optimizing/data_type_test.cc b/compiler/optimizing/data_type_test.cc
index 927291a..3ce683a 100644
--- a/compiler/optimizing/data_type_test.cc
+++ b/compiler/optimizing/data_type_test.cc
@@ -18,6 +18,8 @@
#include "data_type-inl.h"
+#include "base/array_ref.h"
+#include "base/macros.h"
#include "primitive.h"
namespace art {
@@ -57,4 +59,58 @@
#undef CHECK_NAME
}
+TEST(DataType, IsTypeConversionImplicit) {
+ static const DataType::Type kIntegralTypes[] = {
+ DataType::Type::kBool,
+ DataType::Type::kUint8,
+ DataType::Type::kInt8,
+ DataType::Type::kUint16,
+ DataType::Type::kInt16,
+ DataType::Type::kInt32,
+ DataType::Type::kInt64,
+ };
+ const ArrayRef<const DataType::Type> kIntegralInputTypes(kIntegralTypes);
+ // Note: kBool cannot be used as a result type.
+ DCHECK_EQ(kIntegralTypes[0], DataType::Type::kBool);
+ const ArrayRef<const DataType::Type> kIntegralResultTypes = kIntegralInputTypes.SubArray(1u);
+
+ static const bool kImplicitIntegralConversions[][arraysize(kIntegralTypes)] = {
+ // Bool Uint8 Int8 Uint16 Int16 Int32 Int64
+ { /* Bool N/A */ true, true, true, true, true, false },
+ { /* Uint8 N/A */ true, false, true, true, true, false },
+ { /* Int8 N/A */ false, true, false, true, true, false },
+ { /* Uint16 N/A */ false, false, true, false, true, false },
+ { /* Int16 N/A */ false, false, false, true, true, false },
+ { /* Int32 N/A */ false, false, false, false, true, false },
+ { /* Int64 N/A */ false, false, false, false, false, true },
+ };
+ static_assert(arraysize(kIntegralTypes) == arraysize(kImplicitIntegralConversions), "size check");
+
+ for (size_t input_index = 0; input_index != kIntegralInputTypes.size(); ++input_index) {
+ DataType::Type input_type = kIntegralInputTypes[input_index];
+ for (size_t result_index = 1u; result_index != kIntegralResultTypes.size(); ++result_index) {
+ DataType::Type result_type = kIntegralResultTypes[result_index];
+ EXPECT_EQ(kImplicitIntegralConversions[input_index][result_index],
+ DataType::IsTypeConversionImplicit(input_type, result_type))
+ << input_type << " " << result_type;
+ }
+ }
+ for (DataType::Type input_type : kIntegralInputTypes) {
+ EXPECT_FALSE(DataType::IsTypeConversionImplicit(input_type, DataType::Type::kFloat32));
+ EXPECT_FALSE(DataType::IsTypeConversionImplicit(input_type, DataType::Type::kFloat64));
+ }
+ for (DataType::Type result_type : kIntegralResultTypes) {
+ EXPECT_FALSE(DataType::IsTypeConversionImplicit(DataType::Type::kFloat32, result_type));
+ EXPECT_FALSE(DataType::IsTypeConversionImplicit(DataType::Type::kFloat64, result_type));
+ }
+ EXPECT_TRUE(
+ DataType::IsTypeConversionImplicit(DataType::Type::kFloat32, DataType::Type::kFloat32));
+ EXPECT_FALSE(
+ DataType::IsTypeConversionImplicit(DataType::Type::kFloat32, DataType::Type::kFloat64));
+ EXPECT_FALSE(
+ DataType::IsTypeConversionImplicit(DataType::Type::kFloat64, DataType::Type::kFloat32));
+ EXPECT_TRUE(
+ DataType::IsTypeConversionImplicit(DataType::Type::kFloat64, DataType::Type::kFloat64));
+}
+
} // namespace art
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index 9b094e9..5117e07 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -24,7 +24,7 @@
namespace art {
static void MarkReachableBlocks(HGraph* graph, ArenaBitVector* visited) {
- ArenaVector<HBasicBlock*> worklist(graph->GetArena()->Adapter(kArenaAllocDCE));
+ ArenaVector<HBasicBlock*> worklist(graph->GetAllocator()->Adapter(kArenaAllocDCE));
constexpr size_t kDefaultWorlistSize = 8;
worklist.reserve(kDefaultWorlistSize);
visited->SetBit(graph->GetEntryBlock()->GetBlockId());
@@ -306,7 +306,7 @@
bool HDeadCodeElimination::RemoveDeadBlocks() {
// Classify blocks as reachable/unreachable.
- ArenaAllocator* allocator = graph_->GetArena();
+ ArenaAllocator* allocator = graph_->GetAllocator();
ArenaBitVector live_blocks(allocator, graph_->GetBlocks().size(), false, kArenaAllocDCE);
MarkReachableBlocks(graph_, &live_blocks);
diff --git a/compiler/optimizing/dead_code_elimination_test.cc b/compiler/optimizing/dead_code_elimination_test.cc
index 96fa540..929572e 100644
--- a/compiler/optimizing/dead_code_elimination_test.cc
+++ b/compiler/optimizing/dead_code_elimination_test.cc
@@ -27,14 +27,17 @@
namespace art {
-class DeadCodeEliminationTest : public CommonCompilerTest {};
+class DeadCodeEliminationTest : public OptimizingUnitTest {
+ protected:
+ void TestCode(const uint16_t* data,
+ const std::string& expected_before,
+ const std::string& expected_after);
+};
-static void TestCode(const uint16_t* data,
- const std::string& expected_before,
- const std::string& expected_after) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+void DeadCodeEliminationTest::TestCode(const uint16_t* data,
+ const std::string& expected_before,
+ const std::string& expected_after) {
+ HGraph* graph = CreateCFG(data);
ASSERT_NE(graph, nullptr);
StringPrettyPrinter printer_before(graph);
diff --git a/compiler/optimizing/dominator_test.cc b/compiler/optimizing/dominator_test.cc
index 50c677a..6bf3a59 100644
--- a/compiler/optimizing/dominator_test.cc
+++ b/compiler/optimizing/dominator_test.cc
@@ -24,12 +24,13 @@
namespace art {
-class OptimizerTest : public CommonCompilerTest {};
+class OptimizerTest : public OptimizingUnitTest {
+ protected:
+ void TestCode(const uint16_t* data, const uint32_t* blocks, size_t blocks_length);
+};
-static void TestCode(const uint16_t* data, const uint32_t* blocks, size_t blocks_length) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+void OptimizerTest::TestCode(const uint16_t* data, const uint32_t* blocks, size_t blocks_length) {
+ HGraph* graph = CreateCFG(data);
ASSERT_EQ(graph->GetBlocks().size(), blocks_length);
for (size_t i = 0, e = blocks_length; i < e; ++i) {
if (blocks[i] == kInvalidBlockId) {
diff --git a/compiler/optimizing/emit_swap_mips_test.cc b/compiler/optimizing/emit_swap_mips_test.cc
index 0e9c81d..36e932c 100644
--- a/compiler/optimizing/emit_swap_mips_test.cc
+++ b/compiler/optimizing/emit_swap_mips_test.cc
@@ -25,16 +25,15 @@
namespace art {
-class EmitSwapMipsTest : public ::testing::Test {
+class EmitSwapMipsTest : public OptimizingUnitTest {
public:
void SetUp() OVERRIDE {
- allocator_.reset(new ArenaAllocator(&pool_));
- graph_ = CreateGraph(allocator_.get());
+ graph_ = CreateGraph();
isa_features_ = MipsInstructionSetFeatures::FromCppDefines();
- codegen_ = new (graph_->GetArena()) mips::CodeGeneratorMIPS(graph_,
- *isa_features_.get(),
- CompilerOptions());
- moves_ = new (allocator_.get()) HParallelMove(allocator_.get());
+ codegen_ = new (graph_->GetAllocator()) mips::CodeGeneratorMIPS(graph_,
+ *isa_features_.get(),
+ CompilerOptions());
+ moves_ = new (GetAllocator()) HParallelMove(GetAllocator());
test_helper_.reset(
new AssemblerTestInfrastructure(GetArchitectureString(),
GetAssemblerCmdName(),
@@ -47,8 +46,9 @@
}
void TearDown() OVERRIDE {
- allocator_.reset();
test_helper_.reset();
+ isa_features_.reset();
+ ResetPoolAndAllocator();
}
// Get the typically used name for this architecture.
@@ -104,12 +104,10 @@
}
protected:
- ArenaPool pool_;
HGraph* graph_;
HParallelMove* moves_;
mips::CodeGeneratorMIPS* codegen_;
mips::MipsAssembler* assembler_;
- std::unique_ptr<ArenaAllocator> allocator_;
std::unique_ptr<AssemblerTestInfrastructure> test_helper_;
std::unique_ptr<const MipsInstructionSetFeatures> isa_features_;
};
diff --git a/compiler/optimizing/find_loops_test.cc b/compiler/optimizing/find_loops_test.cc
index bbd28f5..c917528 100644
--- a/compiler/optimizing/find_loops_test.cc
+++ b/compiler/optimizing/find_loops_test.cc
@@ -27,7 +27,7 @@
namespace art {
-class FindLoopsTest : public CommonCompilerTest {};
+class FindLoopsTest : public OptimizingUnitTest {};
TEST_F(FindLoopsTest, CFG1) {
// Constant is not used.
@@ -35,9 +35,7 @@
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN_VOID);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
for (HBasicBlock* block : graph->GetBlocks()) {
ASSERT_EQ(block->GetLoopInformation(), nullptr);
}
@@ -48,9 +46,7 @@
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
for (HBasicBlock* block : graph->GetBlocks()) {
ASSERT_EQ(block->GetLoopInformation(), nullptr);
}
@@ -64,9 +60,7 @@
Instruction::GOTO | 0x100,
Instruction::RETURN);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
for (HBasicBlock* block : graph->GetBlocks()) {
ASSERT_EQ(block->GetLoopInformation(), nullptr);
}
@@ -81,9 +75,7 @@
Instruction::CONST_4 | 5 << 12 | 0,
Instruction::RETURN | 0 << 8);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
for (HBasicBlock* block : graph->GetBlocks()) {
ASSERT_EQ(block->GetLoopInformation(), nullptr);
}
@@ -96,9 +88,7 @@
Instruction::CONST_4 | 4 << 12 | 0,
Instruction::RETURN | 0 << 8);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
for (HBasicBlock* block : graph->GetBlocks()) {
ASSERT_EQ(block->GetLoopInformation(), nullptr);
}
@@ -142,9 +132,7 @@
Instruction::GOTO | 0xFE00,
Instruction::RETURN_VOID);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // pre header
@@ -170,9 +158,7 @@
Instruction::GOTO | 0xFD00,
Instruction::RETURN | 0 << 8);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // goto block
@@ -195,9 +181,7 @@
Instruction::GOTO | 0xFE00,
Instruction::RETURN | 0 << 8);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // goto block
@@ -221,9 +205,7 @@
Instruction::GOTO | 0xFB00,
Instruction::RETURN | 0 << 8);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // pre header
@@ -247,9 +229,7 @@
Instruction::GOTO | 0xFB00,
Instruction::RETURN | 0 << 8);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // pre header
@@ -272,9 +252,7 @@
Instruction::GOTO | 0xFB00,
Instruction::RETURN | 0 << 8);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // pre header of outer loop
@@ -303,9 +281,7 @@
Instruction::GOTO | 0xFE00, // second loop
Instruction::RETURN | 0 << 8);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // pre header of first loop
@@ -333,9 +309,7 @@
Instruction::GOTO | 0xFD00,
Instruction::RETURN | 0 << 8);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
ASSERT_TRUE(graph->GetBlocks()[3]->IsLoopHeader());
HLoopInformation* info = graph->GetBlocks()[3]->GetLoopInformation();
ASSERT_EQ(1u, info->NumberOfBackEdges());
@@ -349,9 +323,7 @@
Instruction::IF_EQ, 0xFFFF,
Instruction::RETURN | 0 << 8);
- ArenaPool arena;
- ArenaAllocator allocator(&arena);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // pre header of first loop
diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h
index 3060c80..6af7b42 100644
--- a/compiler/optimizing/graph_checker.h
+++ b/compiler/optimizing/graph_checker.h
@@ -28,14 +28,14 @@
public:
explicit GraphChecker(HGraph* graph, const char* dump_prefix = "art::GraphChecker: ")
: HGraphDelegateVisitor(graph),
- errors_(graph->GetArena()->Adapter(kArenaAllocGraphChecker)),
+ errors_(graph->GetAllocator()->Adapter(kArenaAllocGraphChecker)),
dump_prefix_(dump_prefix),
- seen_ids_(graph->GetArena(),
+ seen_ids_(graph->GetAllocator(),
graph->GetCurrentInstructionId(),
false,
kArenaAllocGraphChecker),
- blocks_storage_(graph->GetArena()->Adapter(kArenaAllocGraphChecker)),
- visited_storage_(graph->GetArena(), 0u, true, kArenaAllocGraphChecker) {}
+ blocks_storage_(graph->GetAllocator()->Adapter(kArenaAllocGraphChecker)),
+ visited_storage_(graph->GetAllocator(), 0u, true, kArenaAllocGraphChecker) {}
// Check the whole graph (in reverse post-order).
void Run() {
diff --git a/compiler/optimizing/graph_checker_test.cc b/compiler/optimizing/graph_checker_test.cc
index 2b82319..9ca3e49 100644
--- a/compiler/optimizing/graph_checker_test.cc
+++ b/compiler/optimizing/graph_checker_test.cc
@@ -19,6 +19,12 @@
namespace art {
+class GraphCheckerTest : public OptimizingUnitTest {
+ protected:
+ HGraph* CreateSimpleCFG();
+ void TestCode(const uint16_t* data);
+};
+
/**
* Create a simple control-flow graph composed of two blocks:
*
@@ -27,14 +33,14 @@
* BasicBlock 1, pred: 0
* 1: Exit
*/
-HGraph* CreateSimpleCFG(ArenaAllocator* allocator) {
- HGraph* graph = CreateGraph(allocator);
- HBasicBlock* entry_block = new (allocator) HBasicBlock(graph);
- entry_block->AddInstruction(new (allocator) HReturnVoid());
+HGraph* GraphCheckerTest::CreateSimpleCFG() {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry_block = new (GetAllocator()) HBasicBlock(graph);
+ entry_block->AddInstruction(new (GetAllocator()) HReturnVoid());
graph->AddBlock(entry_block);
graph->SetEntryBlock(entry_block);
- HBasicBlock* exit_block = new (allocator) HBasicBlock(graph);
- exit_block->AddInstruction(new (allocator) HExit());
+ HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph);
+ exit_block->AddInstruction(new (GetAllocator()) HExit());
graph->AddBlock(exit_block);
graph->SetExitBlock(exit_block);
entry_block->AddSuccessor(exit_block);
@@ -42,10 +48,8 @@
return graph;
}
-static void TestCode(const uint16_t* data) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+void GraphCheckerTest::TestCode(const uint16_t* data) {
+ HGraph* graph = CreateCFG(data);
ASSERT_NE(graph, nullptr);
GraphChecker graph_checker(graph);
@@ -53,8 +57,6 @@
ASSERT_TRUE(graph_checker.IsValid());
}
-class GraphCheckerTest : public CommonCompilerTest {};
-
TEST_F(GraphCheckerTest, ReturnVoid) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::RETURN_VOID);
@@ -93,10 +95,7 @@
// Test case with an invalid graph containing inconsistent
// predecessor/successor arcs in CFG.
TEST_F(GraphCheckerTest, InconsistentPredecessorsAndSuccessors) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateSimpleCFG(&allocator);
+ HGraph* graph = CreateSimpleCFG();
GraphChecker graph_checker(graph);
graph_checker.Run();
ASSERT_TRUE(graph_checker.IsValid());
@@ -111,10 +110,7 @@
// Test case with an invalid graph containing a non-branch last
// instruction in a block.
TEST_F(GraphCheckerTest, BlockEndingWithNonBranchInstruction) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateSimpleCFG(&allocator);
+ HGraph* graph = CreateSimpleCFG();
GraphChecker graph_checker(graph);
graph_checker.Run();
ASSERT_TRUE(graph_checker.IsValid());
diff --git a/compiler/optimizing/graph_test.cc b/compiler/optimizing/graph_test.cc
index 28ee3a5..29af808 100644
--- a/compiler/optimizing/graph_test.cc
+++ b/compiler/optimizing/graph_test.cc
@@ -24,43 +24,52 @@
namespace art {
-static HBasicBlock* createIfBlock(HGraph* graph, ArenaAllocator* allocator) {
- HBasicBlock* if_block = new (allocator) HBasicBlock(graph);
+class GraphTest : public OptimizingUnitTest {
+ protected:
+ HBasicBlock* CreateIfBlock(HGraph* graph);
+ HBasicBlock* CreateGotoBlock(HGraph* graph);
+ HBasicBlock* CreateEntryBlock(HGraph* graph);
+ HBasicBlock* CreateReturnBlock(HGraph* graph);
+ HBasicBlock* CreateExitBlock(HGraph* graph);
+};
+
+HBasicBlock* GraphTest::CreateIfBlock(HGraph* graph) {
+ HBasicBlock* if_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(if_block);
HInstruction* instr = graph->GetIntConstant(4);
- HInstruction* equal = new (allocator) HEqual(instr, instr);
+ HInstruction* equal = new (GetAllocator()) HEqual(instr, instr);
if_block->AddInstruction(equal);
- instr = new (allocator) HIf(equal);
+ instr = new (GetAllocator()) HIf(equal);
if_block->AddInstruction(instr);
return if_block;
}
-static HBasicBlock* createGotoBlock(HGraph* graph, ArenaAllocator* allocator) {
- HBasicBlock* block = new (allocator) HBasicBlock(graph);
+HBasicBlock* GraphTest::CreateGotoBlock(HGraph* graph) {
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
- HInstruction* got = new (allocator) HGoto();
+ HInstruction* got = new (GetAllocator()) HGoto();
block->AddInstruction(got);
return block;
}
-static HBasicBlock* createEntryBlock(HGraph* graph, ArenaAllocator* allocator) {
- HBasicBlock* block = createGotoBlock(graph, allocator);
+HBasicBlock* GraphTest::CreateEntryBlock(HGraph* graph) {
+ HBasicBlock* block = CreateGotoBlock(graph);
graph->SetEntryBlock(block);
return block;
}
-static HBasicBlock* createReturnBlock(HGraph* graph, ArenaAllocator* allocator) {
- HBasicBlock* block = new (allocator) HBasicBlock(graph);
+HBasicBlock* GraphTest::CreateReturnBlock(HGraph* graph) {
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
- HInstruction* return_instr = new (allocator) HReturnVoid();
+ HInstruction* return_instr = new (GetAllocator()) HReturnVoid();
block->AddInstruction(return_instr);
return block;
}
-static HBasicBlock* createExitBlock(HGraph* graph, ArenaAllocator* allocator) {
- HBasicBlock* block = new (allocator) HBasicBlock(graph);
+HBasicBlock* GraphTest::CreateExitBlock(HGraph* graph) {
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
- HInstruction* exit_instr = new (allocator) HExit();
+ HInstruction* exit_instr = new (GetAllocator()) HExit();
block->AddInstruction(exit_instr);
return block;
}
@@ -68,16 +77,13 @@
// Test that the successors of an if block stay consistent after a SimplifyCFG.
// This test sets the false block to be the return block.
-TEST(GraphTest, IfSuccessorSimpleJoinBlock1) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry_block = createEntryBlock(graph, &allocator);
- HBasicBlock* if_block = createIfBlock(graph, &allocator);
- HBasicBlock* if_true = createGotoBlock(graph, &allocator);
- HBasicBlock* return_block = createReturnBlock(graph, &allocator);
- HBasicBlock* exit_block = createExitBlock(graph, &allocator);
+TEST_F(GraphTest, IfSuccessorSimpleJoinBlock1) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry_block = CreateEntryBlock(graph);
+ HBasicBlock* if_block = CreateIfBlock(graph);
+ HBasicBlock* if_true = CreateGotoBlock(graph);
+ HBasicBlock* return_block = CreateReturnBlock(graph);
+ HBasicBlock* exit_block = CreateExitBlock(graph);
entry_block->AddSuccessor(if_block);
if_block->AddSuccessor(if_true);
@@ -103,16 +109,13 @@
// Test that the successors of an if block stay consistent after a SimplifyCFG.
// This test sets the true block to be the return block.
-TEST(GraphTest, IfSuccessorSimpleJoinBlock2) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry_block = createEntryBlock(graph, &allocator);
- HBasicBlock* if_block = createIfBlock(graph, &allocator);
- HBasicBlock* if_false = createGotoBlock(graph, &allocator);
- HBasicBlock* return_block = createReturnBlock(graph, &allocator);
- HBasicBlock* exit_block = createExitBlock(graph, &allocator);
+TEST_F(GraphTest, IfSuccessorSimpleJoinBlock2) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry_block = CreateEntryBlock(graph);
+ HBasicBlock* if_block = CreateIfBlock(graph);
+ HBasicBlock* if_false = CreateGotoBlock(graph);
+ HBasicBlock* return_block = CreateReturnBlock(graph);
+ HBasicBlock* exit_block = CreateExitBlock(graph);
entry_block->AddSuccessor(if_block);
if_block->AddSuccessor(return_block);
@@ -138,15 +141,12 @@
// Test that the successors of an if block stay consistent after a SimplifyCFG.
// This test sets the true block to be the loop header.
-TEST(GraphTest, IfSuccessorMultipleBackEdges1) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry_block = createEntryBlock(graph, &allocator);
- HBasicBlock* if_block = createIfBlock(graph, &allocator);
- HBasicBlock* return_block = createReturnBlock(graph, &allocator);
- HBasicBlock* exit_block = createExitBlock(graph, &allocator);
+TEST_F(GraphTest, IfSuccessorMultipleBackEdges1) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry_block = CreateEntryBlock(graph);
+ HBasicBlock* if_block = CreateIfBlock(graph);
+ HBasicBlock* return_block = CreateReturnBlock(graph);
+ HBasicBlock* exit_block = CreateExitBlock(graph);
entry_block->AddSuccessor(if_block);
if_block->AddSuccessor(if_block);
@@ -173,15 +173,12 @@
// Test that the successors of an if block stay consistent after a SimplifyCFG.
// This test sets the false block to be the loop header.
-TEST(GraphTest, IfSuccessorMultipleBackEdges2) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry_block = createEntryBlock(graph, &allocator);
- HBasicBlock* if_block = createIfBlock(graph, &allocator);
- HBasicBlock* return_block = createReturnBlock(graph, &allocator);
- HBasicBlock* exit_block = createExitBlock(graph, &allocator);
+TEST_F(GraphTest, IfSuccessorMultipleBackEdges2) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry_block = CreateEntryBlock(graph);
+ HBasicBlock* if_block = CreateIfBlock(graph);
+ HBasicBlock* return_block = CreateReturnBlock(graph);
+ HBasicBlock* exit_block = CreateExitBlock(graph);
entry_block->AddSuccessor(if_block);
if_block->AddSuccessor(return_block);
@@ -208,16 +205,13 @@
// Test that the successors of an if block stay consistent after a SimplifyCFG.
// This test sets the true block to be a loop header with multiple pre headers.
-TEST(GraphTest, IfSuccessorMultiplePreHeaders1) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry_block = createEntryBlock(graph, &allocator);
- HBasicBlock* first_if_block = createIfBlock(graph, &allocator);
- HBasicBlock* if_block = createIfBlock(graph, &allocator);
- HBasicBlock* loop_block = createGotoBlock(graph, &allocator);
- HBasicBlock* return_block = createReturnBlock(graph, &allocator);
+TEST_F(GraphTest, IfSuccessorMultiplePreHeaders1) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry_block = CreateEntryBlock(graph);
+ HBasicBlock* first_if_block = CreateIfBlock(graph);
+ HBasicBlock* if_block = CreateIfBlock(graph);
+ HBasicBlock* loop_block = CreateGotoBlock(graph);
+ HBasicBlock* return_block = CreateReturnBlock(graph);
entry_block->AddSuccessor(first_if_block);
first_if_block->AddSuccessor(if_block);
@@ -247,16 +241,13 @@
// Test that the successors of an if block stay consistent after a SimplifyCFG.
// This test sets the false block to be a loop header with multiple pre headers.
-TEST(GraphTest, IfSuccessorMultiplePreHeaders2) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry_block = createEntryBlock(graph, &allocator);
- HBasicBlock* first_if_block = createIfBlock(graph, &allocator);
- HBasicBlock* if_block = createIfBlock(graph, &allocator);
- HBasicBlock* loop_block = createGotoBlock(graph, &allocator);
- HBasicBlock* return_block = createReturnBlock(graph, &allocator);
+TEST_F(GraphTest, IfSuccessorMultiplePreHeaders2) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry_block = CreateEntryBlock(graph);
+ HBasicBlock* first_if_block = CreateIfBlock(graph);
+ HBasicBlock* if_block = CreateIfBlock(graph);
+ HBasicBlock* loop_block = CreateGotoBlock(graph);
+ HBasicBlock* return_block = CreateReturnBlock(graph);
entry_block->AddSuccessor(first_if_block);
first_if_block->AddSuccessor(if_block);
@@ -283,17 +274,14 @@
loop_block->GetLoopInformation()->GetPreHeader());
}
-TEST(GraphTest, InsertInstructionBefore) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* block = createGotoBlock(graph, &allocator);
+TEST_F(GraphTest, InsertInstructionBefore) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* block = CreateGotoBlock(graph);
HInstruction* got = block->GetLastInstruction();
ASSERT_TRUE(got->IsControlFlow());
// Test at the beginning of the block.
- HInstruction* first_instruction = new (&allocator) HIntConstant(4);
+ HInstruction* first_instruction = new (GetAllocator()) HIntConstant(4);
block->InsertInstructionBefore(first_instruction, got);
ASSERT_NE(first_instruction->GetId(), -1);
@@ -306,7 +294,7 @@
ASSERT_EQ(got->GetPrevious(), first_instruction);
// Test in the middle of the block.
- HInstruction* second_instruction = new (&allocator) HIntConstant(4);
+ HInstruction* second_instruction = new (GetAllocator()) HIntConstant(4);
block->InsertInstructionBefore(second_instruction, got);
ASSERT_NE(second_instruction->GetId(), -1);
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 194f063..eccdccf 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -521,20 +521,28 @@
StartAttributeStream("kind") << deoptimize->GetKind();
}
+ void VisitVecOperation(HVecOperation* vec_operation) OVERRIDE {
+ StartAttributeStream("packed_type") << vec_operation->GetPackedType();
+ }
+
void VisitVecHalvingAdd(HVecHalvingAdd* hadd) OVERRIDE {
+ VisitVecBinaryOperation(hadd);
StartAttributeStream("unsigned") << std::boolalpha << hadd->IsUnsigned() << std::noboolalpha;
StartAttributeStream("rounded") << std::boolalpha << hadd->IsRounded() << std::noboolalpha;
}
void VisitVecMin(HVecMin* min) OVERRIDE {
+ VisitVecBinaryOperation(min);
StartAttributeStream("unsigned") << std::boolalpha << min->IsUnsigned() << std::noboolalpha;
}
void VisitVecMax(HVecMax* max) OVERRIDE {
+ VisitVecBinaryOperation(max);
StartAttributeStream("unsigned") << std::boolalpha << max->IsUnsigned() << std::noboolalpha;
}
void VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) OVERRIDE {
+ VisitVecOperation(instruction);
StartAttributeStream("kind") << instruction->GetOpKind();
}
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index 8ea312d..c09e5df 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -566,7 +566,7 @@
}
void GVNOptimization::Run() {
- GlobalValueNumberer gvn(graph_->GetArena(), graph_, side_effects_);
+ GlobalValueNumberer gvn(graph_->GetAllocator(), graph_, side_effects_);
gvn.Run();
}
diff --git a/compiler/optimizing/gvn_test.cc b/compiler/optimizing/gvn_test.cc
index ac0dbee..3bf4cc3 100644
--- a/compiler/optimizing/gvn_test.cc
+++ b/compiler/optimizing/gvn_test.cc
@@ -24,77 +24,74 @@
namespace art {
-class GVNTest : public CommonCompilerTest {};
+class GVNTest : public OptimizingUnitTest {};
TEST_F(GVNTest, LocalFieldElimination) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kReference);
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(graph->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kReference);
entry->AddInstruction(parameter);
- HBasicBlock* block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
entry->AddSuccessor(block);
- block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kReference,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
- block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kReference,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
+ block->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kReference,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
+ block->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kReference,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
HInstruction* to_remove = block->GetLastInstruction();
- block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kReference,
- MemberOffset(43),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
+ block->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kReference,
+ MemberOffset(43),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
HInstruction* different_offset = block->GetLastInstruction();
// Kill the value.
- block->AddInstruction(new (&allocator) HInstanceFieldSet(parameter,
- parameter,
- nullptr,
- DataType::Type::kReference,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
- block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kReference,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
+ block->AddInstruction(new (GetAllocator()) HInstanceFieldSet(parameter,
+ parameter,
+ nullptr,
+ DataType::Type::kReference,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
+ block->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kReference,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
HInstruction* use_after_kill = block->GetLastInstruction();
- block->AddInstruction(new (&allocator) HExit());
+ block->AddInstruction(new (GetAllocator()) HExit());
ASSERT_EQ(to_remove->GetBlock(), block);
ASSERT_EQ(different_offset->GetBlock(), block);
@@ -111,36 +108,33 @@
}
TEST_F(GVNTest, GlobalFieldElimination) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kReference);
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(graph->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kReference);
entry->AddInstruction(parameter);
- HBasicBlock* block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
entry->AddSuccessor(block);
- block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kBool,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
+ block->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kBool,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
- block->AddInstruction(new (&allocator) HIf(block->GetLastInstruction()));
- HBasicBlock* then = new (&allocator) HBasicBlock(graph);
- HBasicBlock* else_ = new (&allocator) HBasicBlock(graph);
- HBasicBlock* join = new (&allocator) HBasicBlock(graph);
+ block->AddInstruction(new (GetAllocator()) HIf(block->GetLastInstruction()));
+ HBasicBlock* then = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* else_ = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* join = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(then);
graph->AddBlock(else_);
graph->AddBlock(join);
@@ -150,36 +144,36 @@
then->AddSuccessor(join);
else_->AddSuccessor(join);
- then->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kBool,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
- then->AddInstruction(new (&allocator) HGoto());
- else_->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kBool,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
- else_->AddInstruction(new (&allocator) HGoto());
- join->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kBool,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
- join->AddInstruction(new (&allocator) HExit());
+ then->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kBool,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
+ then->AddInstruction(new (GetAllocator()) HGoto());
+ else_->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kBool,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
+ else_->AddInstruction(new (GetAllocator()) HGoto());
+ join->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kBool,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
+ join->AddInstruction(new (GetAllocator()) HExit());
graph->BuildDominatorTree();
SideEffectsAnalysis side_effects(graph);
@@ -193,37 +187,34 @@
}
TEST_F(GVNTest, LoopFieldElimination) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kReference);
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(graph->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kReference);
entry->AddInstruction(parameter);
- HBasicBlock* block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
entry->AddSuccessor(block);
- block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kBool,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
- block->AddInstruction(new (&allocator) HGoto());
+ block->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kBool,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
+ block->AddInstruction(new (GetAllocator()) HGoto());
- HBasicBlock* loop_header = new (&allocator) HBasicBlock(graph);
- HBasicBlock* loop_body = new (&allocator) HBasicBlock(graph);
- HBasicBlock* exit = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* loop_header = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* loop_body = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(loop_header);
graph->AddBlock(loop_body);
@@ -233,54 +224,54 @@
loop_header->AddSuccessor(exit);
loop_body->AddSuccessor(loop_header);
- loop_header->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kBool,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
+ loop_header->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kBool,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
HInstruction* field_get_in_loop_header = loop_header->GetLastInstruction();
- loop_header->AddInstruction(new (&allocator) HIf(block->GetLastInstruction()));
+ loop_header->AddInstruction(new (GetAllocator()) HIf(block->GetLastInstruction()));
// Kill inside the loop body to prevent field gets inside the loop header
// and the body to be GVN'ed.
- loop_body->AddInstruction(new (&allocator) HInstanceFieldSet(parameter,
- parameter,
- nullptr,
- DataType::Type::kBool,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
+ loop_body->AddInstruction(new (GetAllocator()) HInstanceFieldSet(parameter,
+ parameter,
+ nullptr,
+ DataType::Type::kBool,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
HInstruction* field_set = loop_body->GetLastInstruction();
- loop_body->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kBool,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
+ loop_body->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kBool,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
HInstruction* field_get_in_loop_body = loop_body->GetLastInstruction();
- loop_body->AddInstruction(new (&allocator) HGoto());
+ loop_body->AddInstruction(new (GetAllocator()) HGoto());
- exit->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kBool,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
+ exit->AddInstruction(new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kBool,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
HInstruction* field_get_in_exit = exit->GetLastInstruction();
- exit->AddInstruction(new (&allocator) HExit());
+ exit->AddInstruction(new (GetAllocator()) HExit());
ASSERT_EQ(field_get_in_loop_header->GetBlock(), loop_header);
ASSERT_EQ(field_get_in_loop_body->GetBlock(), loop_body);
@@ -315,22 +306,19 @@
// Test that inner loops affect the side effects of the outer loop.
TEST_F(GVNTest, LoopSideEffects) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
static const SideEffects kCanTriggerGC = SideEffects::CanTriggerGC();
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HBasicBlock* outer_loop_header = new (&allocator) HBasicBlock(graph);
- HBasicBlock* outer_loop_body = new (&allocator) HBasicBlock(graph);
- HBasicBlock* outer_loop_exit = new (&allocator) HBasicBlock(graph);
- HBasicBlock* inner_loop_header = new (&allocator) HBasicBlock(graph);
- HBasicBlock* inner_loop_body = new (&allocator) HBasicBlock(graph);
- HBasicBlock* inner_loop_exit = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* outer_loop_header = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* outer_loop_body = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* outer_loop_exit = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* inner_loop_header = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* inner_loop_body = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* inner_loop_exit = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(outer_loop_header);
graph->AddBlock(outer_loop_body);
@@ -348,20 +336,20 @@
inner_loop_body->AddSuccessor(inner_loop_header);
inner_loop_exit->AddSuccessor(outer_loop_header);
- HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kBool);
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(graph->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kBool);
entry->AddInstruction(parameter);
- entry->AddInstruction(new (&allocator) HGoto());
- outer_loop_header->AddInstruction(new (&allocator) HSuspendCheck());
- outer_loop_header->AddInstruction(new (&allocator) HIf(parameter));
- outer_loop_body->AddInstruction(new (&allocator) HGoto());
- inner_loop_header->AddInstruction(new (&allocator) HSuspendCheck());
- inner_loop_header->AddInstruction(new (&allocator) HIf(parameter));
- inner_loop_body->AddInstruction(new (&allocator) HGoto());
- inner_loop_exit->AddInstruction(new (&allocator) HGoto());
- outer_loop_exit->AddInstruction(new (&allocator) HExit());
+ entry->AddInstruction(new (GetAllocator()) HGoto());
+ outer_loop_header->AddInstruction(new (GetAllocator()) HSuspendCheck());
+ outer_loop_header->AddInstruction(new (GetAllocator()) HIf(parameter));
+ outer_loop_body->AddInstruction(new (GetAllocator()) HGoto());
+ inner_loop_header->AddInstruction(new (GetAllocator()) HSuspendCheck());
+ inner_loop_header->AddInstruction(new (GetAllocator()) HIf(parameter));
+ inner_loop_body->AddInstruction(new (GetAllocator()) HGoto());
+ inner_loop_exit->AddInstruction(new (GetAllocator()) HGoto());
+ outer_loop_exit->AddInstruction(new (GetAllocator()) HExit());
graph->BuildDominatorTree();
@@ -371,16 +359,16 @@
// Check that the only side effect of loops is to potentially trigger GC.
{
// Make one block with a side effect.
- entry->AddInstruction(new (&allocator) HInstanceFieldSet(parameter,
- parameter,
- nullptr,
- DataType::Type::kReference,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0));
+ entry->AddInstruction(new (GetAllocator()) HInstanceFieldSet(parameter,
+ parameter,
+ nullptr,
+ DataType::Type::kReference,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0));
SideEffectsAnalysis side_effects(graph);
side_effects.Run();
@@ -396,16 +384,16 @@
// Check that the side effects of the outer loop does not affect the inner loop.
{
outer_loop_body->InsertInstructionBefore(
- new (&allocator) HInstanceFieldSet(parameter,
- parameter,
- nullptr,
- DataType::Type::kReference,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0),
+ new (GetAllocator()) HInstanceFieldSet(parameter,
+ parameter,
+ nullptr,
+ DataType::Type::kReference,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0),
outer_loop_body->GetLastInstruction());
SideEffectsAnalysis side_effects(graph);
@@ -422,16 +410,16 @@
{
outer_loop_body->RemoveInstruction(outer_loop_body->GetFirstInstruction());
inner_loop_body->InsertInstructionBefore(
- new (&allocator) HInstanceFieldSet(parameter,
- parameter,
- nullptr,
- DataType::Type::kReference,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0),
+ new (GetAllocator()) HInstanceFieldSet(parameter,
+ parameter,
+ nullptr,
+ DataType::Type::kReference,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0),
inner_loop_body->GetLastInstruction());
SideEffectsAnalysis side_effects(graph);
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index fe286ab..0987293 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -59,14 +59,19 @@
static bool IsNarrowingIntegralConversion(DataType::Type from, DataType::Type to) {
switch (from) {
case DataType::Type::kInt64:
- return to == DataType::Type::kInt8 || to == DataType::Type::kInt16
- || to == DataType::Type::kUint16 || to == DataType::Type::kInt32;
+ return to == DataType::Type::kUint8 ||
+ to == DataType::Type::kInt8 ||
+ to == DataType::Type::kUint16 ||
+ to == DataType::Type::kInt16 ||
+ to == DataType::Type::kInt32;
case DataType::Type::kInt32:
- return to == DataType::Type::kInt8 || to == DataType::Type::kInt16
- || to == DataType::Type::kUint16;
+ return to == DataType::Type::kUint8 ||
+ to == DataType::Type::kInt8 ||
+ to == DataType::Type::kUint16 ||
+ to == DataType::Type::kInt16;
case DataType::Type::kUint16:
case DataType::Type::kInt16:
- return to == DataType::Type::kInt8;
+ return to == DataType::Type::kUint8 || to == DataType::Type::kInt8;
default:
return false;
}
@@ -77,10 +82,11 @@
*/
static DataType::Type ImplicitConversion(DataType::Type type) {
switch (type) {
- case DataType::Type::kInt16:
- case DataType::Type::kUint16:
- case DataType::Type::kInt8:
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
+ case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
return DataType::Type::kInt32;
default:
return type;
@@ -94,17 +100,17 @@
HInductionVarAnalysis::HInductionVarAnalysis(HGraph* graph)
: HOptimization(graph, kInductionPassName),
global_depth_(0),
- stack_(graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
+ stack_(graph->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)),
map_(std::less<HInstruction*>(),
- graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
- scc_(graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
+ graph->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)),
+ scc_(graph->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)),
cycle_(std::less<HInstruction*>(),
- graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
+ graph->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)),
type_(DataType::Type::kVoid),
induction_(std::less<HLoopInformation*>(),
- graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
+ graph->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)),
cycles_(std::less<HPhi*>(),
- graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)) {
+ graph->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)) {
}
void HInductionVarAnalysis::Run() {
@@ -259,7 +265,8 @@
// Rotate proper loop-phi to front.
if (size > 1) {
- ArenaVector<HInstruction*> other(graph_->GetArena()->Adapter(kArenaAllocInductionVarAnalysis));
+ ArenaVector<HInstruction*> other(
+ graph_->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis));
RotateEntryPhiFirst(loop, &scc_, &other);
}
@@ -985,7 +992,7 @@
it = induction_.Put(loop,
ArenaSafeMap<HInstruction*, InductionInfo*>(
std::less<HInstruction*>(),
- graph_->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)));
+ graph_->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)));
}
it->second.Put(instruction, info);
}
@@ -1076,7 +1083,7 @@
return CreateSimplifiedInvariant(kSub, b->op_b, b->op_a);
}
}
- return new (graph_->GetArena()) InductionInfo(
+ return new (graph_->GetAllocator()) InductionInfo(
kInvariant, op, a, b, nullptr, ImplicitConversion(b->type));
}
@@ -1113,7 +1120,7 @@
void HInductionVarAnalysis::AssignCycle(HPhi* phi) {
ArenaSet<HInstruction*>* set = &cycles_.Put(phi, ArenaSet<HInstruction*>(
- graph_->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)))->second;
+ graph_->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)))->second;
for (HInstruction* i : scc_) {
set->insert(i);
}
@@ -1142,9 +1149,10 @@
bool HInductionVarAnalysis::IsNarrowingLinear(InductionInfo* info) {
return info != nullptr &&
info->induction_class == kLinear &&
- (info->type == DataType::Type::kInt8 ||
- info->type == DataType::Type::kInt16 ||
+ (info->type == DataType::Type::kUint8 ||
+ info->type == DataType::Type::kInt8 ||
info->type == DataType::Type::kUint16 ||
+ info->type == DataType::Type::kInt16 ||
(info->type == DataType::Type::kInt32 && (info->op_a->type == DataType::Type::kInt64 ||
info->op_b->type == DataType::Type::kInt64)));
}
diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h
index 421b3ab..a2d302a 100644
--- a/compiler/optimizing/induction_var_analysis.h
+++ b/compiler/optimizing/induction_var_analysis.h
@@ -129,7 +129,7 @@
InductionInfo* CreateInvariantFetch(HInstruction* f) {
DCHECK(f != nullptr);
- return new (graph_->GetArena())
+ return new (graph_->GetAllocator())
InductionInfo(kInvariant, kFetch, nullptr, nullptr, f, f->GetType());
}
@@ -138,7 +138,7 @@
InductionInfo* b,
DataType::Type type) {
DCHECK(a != nullptr && b != nullptr);
- return new (graph_->GetArena()) InductionInfo(kInvariant, op, a, b, nullptr, type);
+ return new (graph_->GetAllocator()) InductionInfo(kInvariant, op, a, b, nullptr, type);
}
InductionInfo* CreateInduction(InductionClass ic,
@@ -148,7 +148,7 @@
HInstruction* f,
DataType::Type type) {
DCHECK(a != nullptr && b != nullptr);
- return new (graph_->GetArena()) InductionInfo(ic, op, a, b, f, type);
+ return new (graph_->GetAllocator()) InductionInfo(ic, op, a, b, f, type);
}
// Methods for analysis.
diff --git a/compiler/optimizing/induction_var_analysis_test.cc b/compiler/optimizing/induction_var_analysis_test.cc
index 53c8044..4c11ad4 100644
--- a/compiler/optimizing/induction_var_analysis_test.cc
+++ b/compiler/optimizing/induction_var_analysis_test.cc
@@ -27,12 +27,10 @@
/**
* Fixture class for the InductionVarAnalysis tests.
*/
-class InductionVarAnalysisTest : public CommonCompilerTest {
+class InductionVarAnalysisTest : public OptimizingUnitTest {
public:
InductionVarAnalysisTest()
- : pool_(),
- allocator_(&pool_),
- iva_(nullptr),
+ : iva_(nullptr),
entry_(nullptr),
return_(nullptr),
exit_(nullptr),
@@ -44,7 +42,7 @@
constant100_(nullptr),
constantm1_(nullptr),
float_constant0_(nullptr) {
- graph_ = CreateGraph(&allocator_);
+ graph_ = CreateGraph();
}
~InductionVarAnalysisTest() { }
@@ -52,15 +50,15 @@
// Builds single for-loop at depth d.
void BuildForLoop(int d, int n) {
ASSERT_LT(d, n);
- loop_preheader_[d] = new (&allocator_) HBasicBlock(graph_);
+ loop_preheader_[d] = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(loop_preheader_[d]);
- loop_header_[d] = new (&allocator_) HBasicBlock(graph_);
+ loop_header_[d] = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(loop_header_[d]);
loop_preheader_[d]->AddSuccessor(loop_header_[d]);
if (d < (n - 1)) {
BuildForLoop(d + 1, n);
}
- loop_body_[d] = new (&allocator_) HBasicBlock(graph_);
+ loop_body_[d] = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(loop_body_[d]);
loop_body_[d]->AddSuccessor(loop_header_[d]);
if (d < (n - 1)) {
@@ -79,12 +77,12 @@
graph_->SetNumberOfVRegs(n + 3);
// Build basic blocks with entry, nested loop, exit.
- entry_ = new (&allocator_) HBasicBlock(graph_);
+ entry_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry_);
BuildForLoop(0, n);
- return_ = new (&allocator_) HBasicBlock(graph_);
+ return_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(return_);
- exit_ = new (&allocator_) HBasicBlock(graph_);
+ exit_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(exit_);
entry_->AddSuccessor(loop_preheader_[0]);
loop_header_[0]->AddSuccessor(return_);
@@ -93,7 +91,7 @@
graph_->SetExitBlock(exit_);
// Provide entry and exit instructions.
- parameter_ = new (&allocator_) HParameterValue(
+ parameter_ = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference, true);
entry_->AddInstruction(parameter_);
constant0_ = graph_->GetIntConstant(0);
@@ -103,20 +101,20 @@
constant100_ = graph_->GetIntConstant(100);
constantm1_ = graph_->GetIntConstant(-1);
float_constant0_ = graph_->GetFloatConstant(0.0f);
- return_->AddInstruction(new (&allocator_) HReturnVoid());
- exit_->AddInstruction(new (&allocator_) HExit());
+ return_->AddInstruction(new (GetAllocator()) HReturnVoid());
+ exit_->AddInstruction(new (GetAllocator()) HExit());
// Provide loop instructions.
for (int d = 0; d < n; d++) {
- basic_[d] = new (&allocator_) HPhi(&allocator_, d, 0, DataType::Type::kInt32);
- loop_preheader_[d]->AddInstruction(new (&allocator_) HGoto());
+ basic_[d] = new (GetAllocator()) HPhi(GetAllocator(), d, 0, DataType::Type::kInt32);
+ loop_preheader_[d]->AddInstruction(new (GetAllocator()) HGoto());
loop_header_[d]->AddPhi(basic_[d]);
- HInstruction* compare = new (&allocator_) HLessThan(basic_[d], constant100_);
+ HInstruction* compare = new (GetAllocator()) HLessThan(basic_[d], constant100_);
loop_header_[d]->AddInstruction(compare);
- loop_header_[d]->AddInstruction(new (&allocator_) HIf(compare));
- increment_[d] = new (&allocator_) HAdd(DataType::Type::kInt32, basic_[d], constant1_);
+ loop_header_[d]->AddInstruction(new (GetAllocator()) HIf(compare));
+ increment_[d] = new (GetAllocator()) HAdd(DataType::Type::kInt32, basic_[d], constant1_);
loop_body_[d]->AddInstruction(increment_[d]);
- loop_body_[d]->AddInstruction(new (&allocator_) HGoto());
+ loop_body_[d]->AddInstruction(new (GetAllocator()) HGoto());
basic_[d]->AddInput(constant0_);
basic_[d]->AddInput(increment_[d]);
@@ -125,9 +123,9 @@
// Builds if-statement at depth d.
HPhi* BuildIf(int d, HBasicBlock** ifT, HBasicBlock** ifF) {
- HBasicBlock* cond = new (&allocator_) HBasicBlock(graph_);
- HBasicBlock* ifTrue = new (&allocator_) HBasicBlock(graph_);
- HBasicBlock* ifFalse = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* cond = new (GetAllocator()) HBasicBlock(graph_);
+ HBasicBlock* ifTrue = new (GetAllocator()) HBasicBlock(graph_);
+ HBasicBlock* ifFalse = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(cond);
graph_->AddBlock(ifTrue);
graph_->AddBlock(ifFalse);
@@ -137,11 +135,11 @@
cond->AddSuccessor(ifFalse);
ifTrue->AddSuccessor(loop_body_[d]);
ifFalse->AddSuccessor(loop_body_[d]);
- cond->AddInstruction(new (&allocator_) HIf(parameter_));
+ cond->AddInstruction(new (GetAllocator()) HIf(parameter_));
*ifT = ifTrue;
*ifF = ifFalse;
- HPhi* select_phi = new (&allocator_) HPhi(&allocator_, -1, 0, DataType::Type::kInt32);
+ HPhi* select_phi = new (GetAllocator()) HPhi(GetAllocator(), -1, 0, DataType::Type::kInt32);
loop_body_[d]->AddPhi(select_phi);
return select_phi;
}
@@ -154,7 +152,7 @@
// Inserts a phi to loop header at depth d and returns it.
HPhi* InsertLoopPhi(int vreg, int d) {
- HPhi* phi = new (&allocator_) HPhi(&allocator_, vreg, 0, DataType::Type::kInt32);
+ HPhi* phi = new (GetAllocator()) HPhi(GetAllocator(), vreg, 0, DataType::Type::kInt32);
loop_header_[d]->AddPhi(phi);
return phi;
}
@@ -164,7 +162,7 @@
HInstruction* InsertArrayStore(HInstruction* subscript, int d) {
// ArraySet is given a float value in order to avoid SsaBuilder typing
// it from the array's non-existent reference type info.
- return InsertInstruction(new (&allocator_) HArraySet(
+ return InsertInstruction(new (GetAllocator()) HArraySet(
parameter_, subscript, float_constant0_, DataType::Type::kFloat32, 0), d);
}
@@ -197,13 +195,11 @@
// Performs InductionVarAnalysis (after proper set up).
void PerformInductionVarAnalysis() {
graph_->BuildDominatorTree();
- iva_ = new (&allocator_) HInductionVarAnalysis(graph_);
+ iva_ = new (GetAllocator()) HInductionVarAnalysis(graph_);
iva_->Run();
}
// General building fields.
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
HInductionVarAnalysis* iva_;
@@ -286,15 +282,15 @@
// }
BuildLoopNest(1);
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, constant100_, basic_[0]), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, constant100_, basic_[0]), 0);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, constant100_, basic_[0]), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, constant100_, basic_[0]), 0);
HInstruction* mul = InsertInstruction(
- new (&allocator_) HMul(DataType::Type::kInt32, constant100_, basic_[0]), 0);
+ new (GetAllocator()) HMul(DataType::Type::kInt32, constant100_, basic_[0]), 0);
HInstruction* shl = InsertInstruction(
- new (&allocator_) HShl(DataType::Type::kInt32, basic_[0], constant1_), 0);
+ new (GetAllocator()) HShl(DataType::Type::kInt32, basic_[0], constant1_), 0);
HInstruction* neg = InsertInstruction(
- new (&allocator_) HNeg(DataType::Type::kInt32, basic_[0]), 0);
+ new (GetAllocator()) HNeg(DataType::Type::kInt32, basic_[0]), 0);
PerformInductionVarAnalysis();
EXPECT_STREQ("((1) * i + (100)):Int32", GetInductionInfo(add, 0).c_str());
@@ -318,10 +314,10 @@
k_header->AddInput(constant0_);
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant100_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant100_), 0);
HInstruction* store1 = InsertArrayStore(add, 0);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, add, constant1_), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, add, constant1_), 0);
HInstruction* store2 = InsertArrayStore(sub, 0);
k_header->AddInput(sub);
PerformInductionVarAnalysis();
@@ -351,11 +347,11 @@
HPhi* k_body = BuildIf(0, &ifTrue, &ifFalse);
// True-branch.
- HInstruction* inc1 = new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant1_);
+ HInstruction* inc1 = new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant1_);
ifTrue->AddInstruction(inc1);
k_body->AddInput(inc1);
// False-branch.
- HInstruction* inc2 = new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant1_);
+ HInstruction* inc2 = new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant1_);
ifFalse->AddInstruction(inc2);
k_body->AddInput(inc2);
// Merge over a phi.
@@ -384,11 +380,11 @@
HPhi* k = BuildIf(0, &ifTrue, &ifFalse);
// True-branch.
- HInstruction* inc1 = new (&allocator_) HAdd(DataType::Type::kInt32, basic_[0], constant1_);
+ HInstruction* inc1 = new (GetAllocator()) HAdd(DataType::Type::kInt32, basic_[0], constant1_);
ifTrue->AddInstruction(inc1);
k->AddInput(inc1);
// False-branch.
- HInstruction* inc2 = new (&allocator_) HAdd(DataType::Type::kInt32, basic_[0], constant1_);
+ HInstruction* inc2 = new (GetAllocator()) HAdd(DataType::Type::kInt32, basic_[0], constant1_);
ifFalse->AddInstruction(inc2);
k->AddInput(inc2);
// Merge over a phi.
@@ -412,11 +408,11 @@
BuildLoopNest(1);
HInstruction* add1 = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, basic_[0], basic_[0]), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, basic_[0], basic_[0]), 0);
HInstruction* add2 = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, constant7_, basic_[0]), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, constant7_, basic_[0]), 0);
HInstruction* add3 = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, add1, add2), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, add1, add2), 0);
PerformInductionVarAnalysis();
EXPECT_STREQ("((1) * i + (0)):Int32", GetInductionInfo(basic_[0], 0).c_str());
@@ -438,11 +434,11 @@
k_header->AddInput(constant1_);
HInstruction* mul = InsertInstruction(
- new (&allocator_) HMul(DataType::Type::kInt32, basic_[0], constant2_), 0);
+ new (GetAllocator()) HMul(DataType::Type::kInt32, basic_[0], constant2_), 0);
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, constant100_, mul), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, constant100_, mul), 0);
HInstruction* pol = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, add, k_header), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, add, k_header), 0);
k_header->AddInput(pol);
PerformInductionVarAnalysis();
@@ -469,17 +465,17 @@
k_header->AddInput(constant1_);
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant100_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant100_), 0);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, k_header, constant1_), 0);
HInstruction* neg = InsertInstruction(
- new (&allocator_) HNeg(DataType::Type::kInt32, sub), 0);
+ new (GetAllocator()) HNeg(DataType::Type::kInt32, sub), 0);
HInstruction* mul = InsertInstruction(
- new (&allocator_) HMul(DataType::Type::kInt32, k_header, constant2_), 0);
+ new (GetAllocator()) HMul(DataType::Type::kInt32, k_header, constant2_), 0);
HInstruction* shl = InsertInstruction(
- new (&allocator_) HShl(DataType::Type::kInt32, k_header, constant2_), 0);
+ new (GetAllocator()) HShl(DataType::Type::kInt32, k_header, constant2_), 0);
HInstruction* pol = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, basic_[0]), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, basic_[0]), 0);
k_header->AddInput(pol);
PerformInductionVarAnalysis();
@@ -512,11 +508,11 @@
k_header->AddInput(constant7_);
HInstruction* add1 = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, k_header), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, k_header), 0);
HInstruction* add2 = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, add1, k_header), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, add1, k_header), 0);
HInstruction* add3 = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, basic_[0]), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, basic_[0]), 0);
k_header->AddInput(add3);
PerformInductionVarAnalysis();
@@ -542,7 +538,7 @@
k_header->AddInput(constant1_);
HInstruction* mul = InsertInstruction(
- new (&allocator_) HMul(DataType::Type::kInt32, k_header, constant100_), 0);
+ new (GetAllocator()) HMul(DataType::Type::kInt32, k_header, constant100_), 0);
k_header->AddInput(mul);
PerformInductionVarAnalysis();
@@ -567,19 +563,19 @@
k_header->AddInput(constant1_);
HInstruction* add1 = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant1_), 0);
HInstruction* shl1 = InsertInstruction(
- new (&allocator_) HShl(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HShl(DataType::Type::kInt32, k_header, constant1_), 0);
HInstruction* add2 = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, shl1, constant100_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, shl1, constant100_), 0);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, shl1, constant1_), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, shl1, constant1_), 0);
HInstruction* neg = InsertInstruction(
- new (&allocator_) HNeg(DataType::Type::kInt32, sub), 0);
+ new (GetAllocator()) HNeg(DataType::Type::kInt32, sub), 0);
HInstruction* mul = InsertInstruction(
- new (&allocator_) HMul(DataType::Type::kInt32, shl1, constant2_), 0);
+ new (GetAllocator()) HMul(DataType::Type::kInt32, shl1, constant2_), 0);
HInstruction* shl2 = InsertInstruction(
- new (&allocator_) HShl(DataType::Type::kInt32, shl1, constant2_), 0);
+ new (GetAllocator()) HShl(DataType::Type::kInt32, shl1, constant2_), 0);
k_header->AddInput(shl1);
PerformInductionVarAnalysis();
@@ -610,17 +606,17 @@
k_header->AddInput(constant1_);
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant100_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant100_), 0);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, k_header, constant1_), 0);
HInstruction* neg = InsertInstruction(
- new (&allocator_) HNeg(DataType::Type::kInt32, sub), 0);
+ new (GetAllocator()) HNeg(DataType::Type::kInt32, sub), 0);
HInstruction* mul = InsertInstruction(
- new (&allocator_) HMul(DataType::Type::kInt32, k_header, constant2_), 0);
+ new (GetAllocator()) HMul(DataType::Type::kInt32, k_header, constant2_), 0);
HInstruction* shl = InsertInstruction(
- new (&allocator_) HShl(DataType::Type::kInt32, k_header, constant2_), 0);
+ new (GetAllocator()) HShl(DataType::Type::kInt32, k_header, constant2_), 0);
HInstruction* div = InsertInstruction(
- new (&allocator_) HDiv(DataType::Type::kInt32, k_header, constant100_, kNoDexPc), 0);
+ new (GetAllocator()) HDiv(DataType::Type::kInt32, k_header, constant100_, kNoDexPc), 0);
k_header->AddInput(div);
PerformInductionVarAnalysis();
@@ -645,7 +641,7 @@
k_header->AddInput(constant100_);
HInstruction* shr = InsertInstruction(
- new (&allocator_) HShr(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HShr(DataType::Type::kInt32, k_header, constant1_), 0);
k_header->AddInput(shr);
PerformInductionVarAnalysis();
@@ -665,7 +661,7 @@
k_header->AddInput(constantm1_);
HInstruction* shr = InsertInstruction(
- new (&allocator_) HShr(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HShr(DataType::Type::kInt32, k_header, constant1_), 0);
k_header->AddInput(shr);
PerformInductionVarAnalysis();
@@ -689,17 +685,17 @@
k_header->AddInput(constant100_);
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant100_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant100_), 0);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, k_header, constant1_), 0);
HInstruction* neg = InsertInstruction(
- new (&allocator_) HNeg(DataType::Type::kInt32, sub), 0);
+ new (GetAllocator()) HNeg(DataType::Type::kInt32, sub), 0);
HInstruction* mul = InsertInstruction(
- new (&allocator_) HMul(DataType::Type::kInt32, k_header, constant2_), 0);
+ new (GetAllocator()) HMul(DataType::Type::kInt32, k_header, constant2_), 0);
HInstruction* shl = InsertInstruction(
- new (&allocator_) HShl(DataType::Type::kInt32, k_header, constant2_), 0);
+ new (GetAllocator()) HShl(DataType::Type::kInt32, k_header, constant2_), 0);
HInstruction* rem = InsertInstruction(
- new (&allocator_) HRem(DataType::Type::kInt32, k_header, constant7_, kNoDexPc), 0);
+ new (GetAllocator()) HRem(DataType::Type::kInt32, k_header, constant7_, kNoDexPc), 0);
k_header->AddInput(rem);
PerformInductionVarAnalysis();
@@ -731,7 +727,7 @@
HInstruction* store = InsertArrayStore(k_header, 0);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, constant100_, basic_[0]), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, constant100_, basic_[0]), 0);
k_header->AddInput(sub);
PerformInductionVarAnalysis();
@@ -760,7 +756,7 @@
HInstruction* store = InsertArrayStore(k_header, 0);
k_header->AddInput(t);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, constant100_, basic_[0], 0), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, constant100_, basic_[0], 0), 0);
t->AddInput(sub);
PerformInductionVarAnalysis();
@@ -785,19 +781,19 @@
k_header->AddInput(constant0_);
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant100_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant100_), 0);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, k_header, constant100_), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, k_header, constant100_), 0);
HInstruction* mul = InsertInstruction(
- new (&allocator_) HMul(DataType::Type::kInt32, k_header, constant100_), 0);
+ new (GetAllocator()) HMul(DataType::Type::kInt32, k_header, constant100_), 0);
HInstruction* shl1 = InsertInstruction(
- new (&allocator_) HShl(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HShl(DataType::Type::kInt32, k_header, constant1_), 0);
HInstruction* neg1 = InsertInstruction(
- new (&allocator_) HNeg(DataType::Type::kInt32, k_header), 0);
+ new (GetAllocator()) HNeg(DataType::Type::kInt32, k_header), 0);
HInstruction* shl2 = InsertInstruction(
- new (&allocator_) HShl(DataType::Type::kInt32, basic_[0], constant1_), 0);
+ new (GetAllocator()) HShl(DataType::Type::kInt32, basic_[0], constant1_), 0);
HInstruction* neg2 = InsertInstruction(
- new (&allocator_) HNeg(DataType::Type::kInt32, shl2), 0);
+ new (GetAllocator()) HNeg(DataType::Type::kInt32, shl2), 0);
k_header->AddInput(shl2);
PerformInductionVarAnalysis();
@@ -856,7 +852,7 @@
HInstruction* store = InsertArrayStore(k_header, 0);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, constant1_, k_header), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, constant1_, k_header), 0);
k_header->AddInput(sub);
PerformInductionVarAnalysis();
@@ -877,7 +873,7 @@
HInstruction* store = InsertArrayStore(k_header, 0);
HInstruction* x = InsertInstruction(
- new (&allocator_) HXor(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HXor(DataType::Type::kInt32, k_header, constant1_), 0);
k_header->AddInput(x);
PerformInductionVarAnalysis();
@@ -896,7 +892,7 @@
k_header->AddInput(constant1_);
HInstruction* x = InsertInstruction(
- new (&allocator_) HXor(DataType::Type::kInt32, constant1_, k_header), 0);
+ new (GetAllocator()) HXor(DataType::Type::kInt32, constant1_, k_header), 0);
k_header->AddInput(x);
PerformInductionVarAnalysis();
@@ -915,7 +911,7 @@
k_header->AddInput(constant1_);
HInstruction* x = InsertInstruction(
- new (&allocator_) HXor(DataType::Type::kInt32, k_header, constant100_), 0);
+ new (GetAllocator()) HXor(DataType::Type::kInt32, k_header, constant100_), 0);
k_header->AddInput(x);
PerformInductionVarAnalysis();
@@ -933,7 +929,7 @@
HPhi* k_header = InsertLoopPhi(0, 0);
k_header->AddInput(constant0_);
- HInstruction* x = InsertInstruction(new (&allocator_) HEqual(k_header, constant0_), 0);
+ HInstruction* x = InsertInstruction(new (GetAllocator()) HEqual(k_header, constant0_), 0);
k_header->AddInput(x);
PerformInductionVarAnalysis();
@@ -951,7 +947,7 @@
HPhi* k_header = InsertLoopPhi(0, 0);
k_header->AddInput(constant0_);
- HInstruction* x = InsertInstruction(new (&allocator_) HEqual(constant0_, k_header), 0);
+ HInstruction* x = InsertInstruction(new (GetAllocator()) HEqual(constant0_, k_header), 0);
k_header->AddInput(x);
PerformInductionVarAnalysis();
@@ -969,7 +965,7 @@
HPhi* k_header = InsertLoopPhi(0, 0);
k_header->AddInput(constant0_);
- HInstruction* x = InsertInstruction(new (&allocator_) HNotEqual(k_header, constant1_), 0);
+ HInstruction* x = InsertInstruction(new (GetAllocator()) HNotEqual(k_header, constant1_), 0);
k_header->AddInput(x);
PerformInductionVarAnalysis();
@@ -987,7 +983,7 @@
HPhi* k_header = InsertLoopPhi(0, 0);
k_header->AddInput(constant0_);
- HInstruction* x = InsertInstruction(new (&allocator_) HNotEqual(constant1_, k_header), 0);
+ HInstruction* x = InsertInstruction(new (GetAllocator()) HNotEqual(constant1_, k_header), 0);
k_header->AddInput(x);
PerformInductionVarAnalysis();
@@ -1012,19 +1008,19 @@
k_header->AddInput(constant0_);
HInstruction* neg1 = InsertInstruction(
- new (&allocator_) HNeg(DataType::Type::kInt32, k_header), 0);
+ new (GetAllocator()) HNeg(DataType::Type::kInt32, k_header), 0);
HInstruction* idiom = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, constant1_, k_header), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, constant1_, k_header), 0);
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, idiom, constant100_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, idiom, constant100_), 0);
HInstruction* sub = InsertInstruction(
- new (&allocator_) HSub(DataType::Type::kInt32, idiom, constant100_), 0);
+ new (GetAllocator()) HSub(DataType::Type::kInt32, idiom, constant100_), 0);
HInstruction* mul = InsertInstruction(
- new (&allocator_) HMul(DataType::Type::kInt32, idiom, constant100_), 0);
+ new (GetAllocator()) HMul(DataType::Type::kInt32, idiom, constant100_), 0);
HInstruction* shl = InsertInstruction(
- new (&allocator_) HShl(DataType::Type::kInt32, idiom, constant1_), 0);
+ new (GetAllocator()) HShl(DataType::Type::kInt32, idiom, constant1_), 0);
HInstruction* neg2 = InsertInstruction(
- new (&allocator_) HNeg(DataType::Type::kInt32, idiom), 0);
+ new (GetAllocator()) HNeg(DataType::Type::kInt32, idiom), 0);
k_header->AddInput(idiom);
PerformInductionVarAnalysis();
@@ -1057,7 +1053,7 @@
}
HInstruction* inc = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, constant1_, k_header[9]), 9);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, constant1_, k_header[9]), 9);
HInstruction* store = InsertArrayStore(inc, 9);
for (int d = 0; d < 10; d++) {
@@ -1091,7 +1087,7 @@
// }
BuildLoopNest(1);
HInstruction* conv = InsertInstruction(
- new (&allocator_) HTypeConversion(DataType::Type::kInt8, basic_[0], kNoDexPc), 0);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kInt8, basic_[0], kNoDexPc), 0);
HInstruction* store1 = InsertArrayStore(conv, 0);
HInstruction* store2 = InsertArrayStore(basic_[0], 0);
PerformInductionVarAnalysis();
@@ -1122,10 +1118,10 @@
// }
BuildLoopNest(1);
HInstruction* conv = InsertInstruction(
- new (&allocator_) HTypeConversion(DataType::Type::kInt8, basic_[0], kNoDexPc), 0);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kInt8, basic_[0], kNoDexPc), 0);
HInstruction* store1 = InsertArrayStore(conv, 0);
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, conv, constant1_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, conv, constant1_), 0);
HInstruction* store2 = InsertArrayStore(add, 0);
PerformInductionVarAnalysis();
@@ -1152,9 +1148,9 @@
k_header->AddInput(graph_->GetIntConstant(-128));
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant1_), 0);
HInstruction* conv = InsertInstruction(
- new (&allocator_) HTypeConversion(DataType::Type::kInt8, add, kNoDexPc), 0);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kInt8, add, kNoDexPc), 0);
k_header->AddInput(conv);
PerformInductionVarAnalysis();
@@ -1180,9 +1176,9 @@
k_header->AddInput(graph_->GetIntConstant(-129));
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, k_header, constant1_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, k_header, constant1_), 0);
HInstruction* conv = InsertInstruction(
- new (&allocator_) HTypeConversion(DataType::Type::kInt8, add, kNoDexPc), 0);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kInt8, add, kNoDexPc), 0);
k_header->AddInput(conv);
PerformInductionVarAnalysis();
@@ -1202,9 +1198,9 @@
k_header->AddInput(constant0_);
HInstruction* conv = InsertInstruction(
- new (&allocator_) HTypeConversion(DataType::Type::kInt8, k_header, kNoDexPc), 0);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kInt8, k_header, kNoDexPc), 0);
HInstruction* add = InsertInstruction(
- new (&allocator_) HAdd(DataType::Type::kInt32, conv, constant1_), 0);
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, conv, constant1_), 0);
k_header->AddInput(add);
PerformInductionVarAnalysis();
@@ -1221,7 +1217,7 @@
HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious();
ifs->ReplaceInput(graph_->GetIntConstant(127), 1);
HInstruction* conv =
- new (&allocator_) HTypeConversion(DataType::Type::kInt8, increment_[0], kNoDexPc);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kInt8, increment_[0], kNoDexPc);
loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext());
basic_[0]->ReplaceInput(conv, 1);
PerformInductionVarAnalysis();
@@ -1247,7 +1243,7 @@
HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious();
ifs->ReplaceInput(graph_->GetIntConstant(128), 1);
HInstruction* conv =
- new (&allocator_) HTypeConversion(DataType::Type::kInt8, increment_[0], kNoDexPc);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kInt8, increment_[0], kNoDexPc);
loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext());
basic_[0]->ReplaceInput(conv, 1);
PerformInductionVarAnalysis();
@@ -1273,7 +1269,7 @@
HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious();
ifs->ReplaceInput(graph_->GetIntConstant(32767), 1);
HInstruction* conv =
- new (&allocator_) HTypeConversion(DataType::Type::kInt16, increment_[0], kNoDexPc);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kInt16, increment_[0], kNoDexPc);
loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext());
basic_[0]->ReplaceInput(conv, 1);
PerformInductionVarAnalysis();
@@ -1299,7 +1295,7 @@
HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious();
ifs->ReplaceInput(graph_->GetIntConstant(32768), 1);
HInstruction* conv =
- new (&allocator_) HTypeConversion(DataType::Type::kInt16, increment_[0], kNoDexPc);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kInt16, increment_[0], kNoDexPc);
loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext());
basic_[0]->ReplaceInput(conv, 1);
PerformInductionVarAnalysis();
@@ -1324,7 +1320,7 @@
HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious();
ifs->ReplaceInput(graph_->GetIntConstant(65535), 1);
HInstruction* conv =
- new (&allocator_) HTypeConversion(DataType::Type::kUint16, increment_[0], kNoDexPc);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kUint16, increment_[0], kNoDexPc);
loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext());
basic_[0]->ReplaceInput(conv, 1);
PerformInductionVarAnalysis();
@@ -1349,7 +1345,7 @@
HInstruction* ifs = loop_header_[0]->GetLastInstruction()->GetPrevious();
ifs->ReplaceInput(graph_->GetIntConstant(65536), 1);
HInstruction* conv =
- new (&allocator_) HTypeConversion(DataType::Type::kUint16, increment_[0], kNoDexPc);
+ new (GetAllocator()) HTypeConversion(DataType::Type::kUint16, increment_[0], kNoDexPc);
loop_body_[0]->InsertInstructionBefore(conv, increment_[0]->GetNext());
basic_[0]->ReplaceInput(conv, 1);
PerformInductionVarAnalysis();
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index 92b584c..99dec11 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -159,9 +159,10 @@
/** Corrects a value for type to account for arithmetic wrap-around in lower precision. */
static InductionVarRange::Value CorrectForType(InductionVarRange::Value v, DataType::Type type) {
switch (type) {
- case DataType::Type::kInt16:
+ case DataType::Type::kUint8:
+ case DataType::Type::kInt8:
case DataType::Type::kUint16:
- case DataType::Type::kInt8: {
+ case DataType::Type::kInt16: {
// Constants within range only.
// TODO: maybe some room for improvement, like allowing widening conversions
int32_t min = DataType::MinValueOfIntegralType(type);
@@ -216,10 +217,11 @@
// bounds check elimination, will have truncated higher precision induction
// at their use point already).
switch (info->type) {
- case DataType::Type::kInt32:
- case DataType::Type::kInt16:
- case DataType::Type::kUint16:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
+ case DataType::Type::kInt32:
break;
default:
return false;
@@ -416,7 +418,8 @@
if (GenerateCode(trip->op_a, nullptr, graph, block, &trip_expr, false, false)) {
if (taken_test != nullptr) {
HInstruction* zero = graph->GetConstant(trip->type, 0);
- trip_expr = Insert(block, new (graph->GetArena()) HSelect(taken_test, trip_expr, zero, kNoDexPc));
+ ArenaAllocator* allocator = graph->GetAllocator();
+ trip_expr = Insert(block, new (allocator) HSelect(taken_test, trip_expr, zero, kNoDexPc));
}
return trip_expr;
}
@@ -1057,7 +1060,7 @@
sum = static_cast<int32_t>(sum); // okay to truncate
}
*result =
- Insert(block, new (graph->GetArena()) HAdd(type, graph->GetConstant(type, sum), c));
+ Insert(block, new (graph->GetAllocator()) HAdd(type, graph->GetConstant(type, sum), c));
}
return true;
}
@@ -1102,12 +1105,13 @@
} else {
// Last value: a * f ^ m + b or a * f ^ -m + b.
HInstruction* e = nullptr;
+ ArenaAllocator* allocator = graph->GetAllocator();
if (info->operation == HInductionVarAnalysis::kMul) {
- e = new (graph->GetArena()) HMul(type, opa, graph->GetConstant(type, fpow));
+ e = new (allocator) HMul(type, opa, graph->GetConstant(type, fpow));
} else {
- e = new (graph->GetArena()) HDiv(type, opa, graph->GetConstant(type, fpow), kNoDexPc);
+ e = new (allocator) HDiv(type, opa, graph->GetConstant(type, fpow), kNoDexPc);
}
- *result = Insert(block, new (graph->GetArena()) HAdd(type, Insert(block, e), opb));
+ *result = Insert(block, new (allocator) HAdd(type, Insert(block, e), opb));
}
}
return true;
@@ -1188,18 +1192,20 @@
// During actual code generation (graph != nullptr), generate is_even ? x : y.
if (graph != nullptr) {
DataType::Type type = trip->type;
+ ArenaAllocator* allocator = graph->GetAllocator();
HInstruction* msk =
- Insert(block, new (graph->GetArena()) HAnd(type, t, graph->GetConstant(type, 1)));
+ Insert(block, new (allocator) HAnd(type, t, graph->GetConstant(type, 1)));
HInstruction* is_even =
- Insert(block, new (graph->GetArena()) HEqual(msk, graph->GetConstant(type, 0), kNoDexPc));
- *result = Insert(block, new (graph->GetArena()) HSelect(is_even, x, y, kNoDexPc));
+ Insert(block, new (allocator) HEqual(msk, graph->GetConstant(type, 0), kNoDexPc));
+ *result = Insert(block, new (graph->GetAllocator()) HSelect(is_even, x, y, kNoDexPc));
}
// Guard select with taken test if needed.
if (*needs_taken_test) {
HInstruction* is_taken = nullptr;
if (GenerateCode(trip->op_b, nullptr, graph, block, graph ? &is_taken : nullptr, false, false)) {
if (graph != nullptr) {
- *result = Insert(block, new (graph->GetArena()) HSelect(is_taken, *result, x, kNoDexPc));
+ ArenaAllocator* allocator = graph->GetAllocator();
+ *result = Insert(block, new (allocator) HSelect(is_taken, *result, x, kNoDexPc));
}
*needs_taken_test = false; // taken care of
} else {
@@ -1248,25 +1254,25 @@
HInstruction* operation = nullptr;
switch (info->operation) {
case HInductionVarAnalysis::kAdd:
- operation = new (graph->GetArena()) HAdd(type, opa, opb); break;
+ operation = new (graph->GetAllocator()) HAdd(type, opa, opb); break;
case HInductionVarAnalysis::kSub:
- operation = new (graph->GetArena()) HSub(type, opa, opb); break;
+ operation = new (graph->GetAllocator()) HSub(type, opa, opb); break;
case HInductionVarAnalysis::kMul:
- operation = new (graph->GetArena()) HMul(type, opa, opb, kNoDexPc); break;
+ operation = new (graph->GetAllocator()) HMul(type, opa, opb, kNoDexPc); break;
case HInductionVarAnalysis::kDiv:
- operation = new (graph->GetArena()) HDiv(type, opa, opb, kNoDexPc); break;
+ operation = new (graph->GetAllocator()) HDiv(type, opa, opb, kNoDexPc); break;
case HInductionVarAnalysis::kRem:
- operation = new (graph->GetArena()) HRem(type, opa, opb, kNoDexPc); break;
+ operation = new (graph->GetAllocator()) HRem(type, opa, opb, kNoDexPc); break;
case HInductionVarAnalysis::kXor:
- operation = new (graph->GetArena()) HXor(type, opa, opb); break;
+ operation = new (graph->GetAllocator()) HXor(type, opa, opb); break;
case HInductionVarAnalysis::kLT:
- operation = new (graph->GetArena()) HLessThan(opa, opb); break;
+ operation = new (graph->GetAllocator()) HLessThan(opa, opb); break;
case HInductionVarAnalysis::kLE:
- operation = new (graph->GetArena()) HLessThanOrEqual(opa, opb); break;
+ operation = new (graph->GetAllocator()) HLessThanOrEqual(opa, opb); break;
case HInductionVarAnalysis::kGT:
- operation = new (graph->GetArena()) HGreaterThan(opa, opb); break;
+ operation = new (graph->GetAllocator()) HGreaterThan(opa, opb); break;
case HInductionVarAnalysis::kGE:
- operation = new (graph->GetArena()) HGreaterThanOrEqual(opa, opb); break;
+ operation = new (graph->GetAllocator()) HGreaterThanOrEqual(opa, opb); break;
default:
LOG(FATAL) << "unknown operation";
}
@@ -1278,7 +1284,7 @@
case HInductionVarAnalysis::kNeg:
if (GenerateCode(info->op_b, trip, graph, block, &opb, in_body, !is_min)) {
if (graph != nullptr) {
- *result = Insert(block, new (graph->GetArena()) HNeg(type, opb));
+ *result = Insert(block, new (graph->GetAllocator()) HNeg(type, opb));
}
return true;
}
@@ -1304,9 +1310,9 @@
} else if (in_body) {
if (GenerateCode(info->op_a, trip, graph, block, &opb, in_body, is_min)) {
if (graph != nullptr) {
+ ArenaAllocator* allocator = graph->GetAllocator();
*result =
- Insert(block,
- new (graph->GetArena()) HSub(type, opb, graph->GetConstant(type, 1)));
+ Insert(block, new (allocator) HSub(type, opb, graph->GetConstant(type, 1)));
}
return true;
}
@@ -1331,15 +1337,16 @@
if (GenerateCode(trip, trip, graph, block, &opa, in_body, is_min_a) &&
GenerateCode(info->op_b, trip, graph, block, &opb, in_body, is_min)) {
if (graph != nullptr) {
+ ArenaAllocator* allocator = graph->GetAllocator();
HInstruction* oper;
if (stride_value == 1) {
- oper = new (graph->GetArena()) HAdd(type, opa, opb);
+ oper = new (allocator) HAdd(type, opa, opb);
} else if (stride_value == -1) {
- oper = new (graph->GetArena()) HSub(type, opb, opa);
+ oper = new (graph->GetAllocator()) HSub(type, opb, opa);
} else {
HInstruction* mul =
- new (graph->GetArena()) HMul(type, graph->GetConstant(type, stride_value), opa);
- oper = new (graph->GetArena()) HAdd(type, Insert(block, mul), opb);
+ new (allocator) HMul(type, graph->GetConstant(type, stride_value), opa);
+ oper = new (allocator) HAdd(type, Insert(block, mul), opb);
}
*result = Insert(block, oper);
}
diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc
index 1c84269..e5bc6ef 100644
--- a/compiler/optimizing/induction_var_range_test.cc
+++ b/compiler/optimizing/induction_var_range_test.cc
@@ -29,13 +29,11 @@
/**
* Fixture class for the InductionVarRange tests.
*/
-class InductionVarRangeTest : public CommonCompilerTest {
+class InductionVarRangeTest : public OptimizingUnitTest {
public:
InductionVarRangeTest()
- : pool_(),
- allocator_(&pool_),
- graph_(CreateGraph(&allocator_)),
- iva_(new (&allocator_) HInductionVarAnalysis(graph_)),
+ : graph_(CreateGraph()),
+ iva_(new (GetAllocator()) HInductionVarAnalysis(graph_)),
range_(iva_) {
BuildGraph();
}
@@ -61,22 +59,22 @@
/** Constructs bare minimum graph. */
void BuildGraph() {
graph_->SetNumberOfVRegs(1);
- entry_block_ = new (&allocator_) HBasicBlock(graph_);
- exit_block_ = new (&allocator_) HBasicBlock(graph_);
+ entry_block_ = new (GetAllocator()) HBasicBlock(graph_);
+ exit_block_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry_block_);
graph_->AddBlock(exit_block_);
graph_->SetEntryBlock(entry_block_);
graph_->SetExitBlock(exit_block_);
// Two parameters.
- x_ = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kInt32);
+ x_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kInt32);
entry_block_->AddInstruction(x_);
- y_ = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kInt32);
+ y_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kInt32);
entry_block_->AddInstruction(y_);
// Set arbitrary range analysis hint while testing private methods.
SetHint(x_);
@@ -85,13 +83,13 @@
/** Constructs loop with given upper bound. */
void BuildLoop(int32_t lower, HInstruction* upper, int32_t stride) {
// Control flow.
- loop_preheader_ = new (&allocator_) HBasicBlock(graph_);
+ loop_preheader_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(loop_preheader_);
- loop_header_ = new (&allocator_) HBasicBlock(graph_);
+ loop_header_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(loop_header_);
- loop_body_ = new (&allocator_) HBasicBlock(graph_);
+ loop_body_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(loop_body_);
- HBasicBlock* return_block = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* return_block = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(return_block);
entry_block_->AddSuccessor(loop_preheader_);
loop_preheader_->AddSuccessor(loop_header_);
@@ -100,24 +98,24 @@
loop_body_->AddSuccessor(loop_header_);
return_block->AddSuccessor(exit_block_);
// Instructions.
- loop_preheader_->AddInstruction(new (&allocator_) HGoto());
- HPhi* phi = new (&allocator_) HPhi(&allocator_, 0, 0, DataType::Type::kInt32);
+ loop_preheader_->AddInstruction(new (GetAllocator()) HGoto());
+ HPhi* phi = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32);
loop_header_->AddPhi(phi);
phi->AddInput(graph_->GetIntConstant(lower)); // i = l
if (stride > 0) {
- condition_ = new (&allocator_) HLessThan(phi, upper); // i < u
+ condition_ = new (GetAllocator()) HLessThan(phi, upper); // i < u
} else {
- condition_ = new (&allocator_) HGreaterThan(phi, upper); // i > u
+ condition_ = new (GetAllocator()) HGreaterThan(phi, upper); // i > u
}
loop_header_->AddInstruction(condition_);
- loop_header_->AddInstruction(new (&allocator_) HIf(condition_));
+ loop_header_->AddInstruction(new (GetAllocator()) HIf(condition_));
increment_ =
- new (&allocator_) HAdd(DataType::Type::kInt32, phi, graph_->GetIntConstant(stride));
+ new (GetAllocator()) HAdd(DataType::Type::kInt32, phi, graph_->GetIntConstant(stride));
loop_body_->AddInstruction(increment_); // i += s
phi->AddInput(increment_);
- loop_body_->AddInstruction(new (&allocator_) HGoto());
- return_block->AddInstruction(new (&allocator_) HReturnVoid());
- exit_block_->AddInstruction(new (&allocator_) HExit());
+ loop_body_->AddInstruction(new (GetAllocator()) HGoto());
+ return_block->AddInstruction(new (GetAllocator()) HReturnVoid());
+ exit_block_->AddInstruction(new (GetAllocator()) HExit());
}
/** Constructs SSA and performs induction variable analysis. */
@@ -304,8 +302,6 @@
Value MaxValue(Value v1, Value v2) { return range_.MergeVal(v1, v2, false); }
// General building fields.
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
HBasicBlock* entry_block_;
HBasicBlock* exit_block_;
@@ -705,9 +701,9 @@
TEST_F(InductionVarRangeTest, ArrayLengthAndHints) {
// We pass a bogus constant for the class to avoid mocking one.
- HInstruction* new_array = new (&allocator_) HNewArray(x_, x_, 0);
+ HInstruction* new_array = new (GetAllocator()) HNewArray(x_, x_, 0);
entry_block_->AddInstruction(new_array);
- HInstruction* array_length = new (&allocator_) HArrayLength(new_array, 0);
+ HInstruction* array_length = new (GetAllocator()) HArrayLength(new_array, 0);
entry_block_->AddInstruction(array_length);
// With null hint: yields extreme constants.
const int32_t max_value = std::numeric_limits<int32_t>::max();
@@ -725,13 +721,13 @@
}
TEST_F(InductionVarRangeTest, AddOrSubAndConstant) {
- HInstruction* add = new (&allocator_)
+ HInstruction* add = new (GetAllocator())
HAdd(DataType::Type::kInt32, x_, graph_->GetIntConstant(-1));
- HInstruction* alt = new (&allocator_)
+ HInstruction* alt = new (GetAllocator())
HAdd(DataType::Type::kInt32, graph_->GetIntConstant(-1), x_);
- HInstruction* sub = new (&allocator_)
+ HInstruction* sub = new (GetAllocator())
HSub(DataType::Type::kInt32, x_, graph_->GetIntConstant(1));
- HInstruction* rev = new (&allocator_)
+ HInstruction* rev = new (GetAllocator())
HSub(DataType::Type::kInt32, graph_->GetIntConstant(1), x_);
entry_block_->AddInstruction(add);
entry_block_->AddInstruction(alt);
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 90e3d2a..4d846fa 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -705,7 +705,7 @@
uint32_t dex_pc) const {
ArtField* field = class_linker->GetClassRoot(ClassLinker::kJavaLangObject)->GetInstanceField(0);
DCHECK_EQ(std::string(field->GetName()), "shadow$_klass_");
- HInstanceFieldGet* result = new (graph_->GetArena()) HInstanceFieldGet(
+ HInstanceFieldGet* result = new (graph_->GetAllocator()) HInstanceFieldGet(
receiver,
field,
DataType::Type::kReference,
@@ -812,12 +812,12 @@
uint32_t dex_pc,
HInstruction* cursor,
HBasicBlock* bb_cursor) {
- HShouldDeoptimizeFlag* deopt_flag = new (graph_->GetArena())
- HShouldDeoptimizeFlag(graph_->GetArena(), dex_pc);
- HInstruction* compare = new (graph_->GetArena()) HNotEqual(
+ HShouldDeoptimizeFlag* deopt_flag = new (graph_->GetAllocator())
+ HShouldDeoptimizeFlag(graph_->GetAllocator(), dex_pc);
+ HInstruction* compare = new (graph_->GetAllocator()) HNotEqual(
deopt_flag, graph_->GetIntConstant(0, dex_pc));
- HInstruction* deopt = new (graph_->GetArena()) HDeoptimize(
- graph_->GetArena(), compare, DeoptimizationKind::kCHA, dex_pc);
+ HInstruction* deopt = new (graph_->GetAllocator()) HDeoptimize(
+ graph_->GetAllocator(), compare, DeoptimizationKind::kCHA, dex_pc);
if (cursor != nullptr) {
bb_cursor->InsertInstructionAfter(deopt_flag, cursor);
@@ -865,13 +865,13 @@
// Note that we will just compare the classes, so we don't need Java semantics access checks.
// Note that the type index and the dex file are relative to the method this type guard is
// inlined into.
- HLoadClass* load_class = new (graph_->GetArena()) HLoadClass(graph_->GetCurrentMethod(),
- class_index,
- caller_dex_file,
- klass,
- is_referrer,
- invoke_instruction->GetDexPc(),
- /* needs_access_check */ false);
+ HLoadClass* load_class = new (graph_->GetAllocator()) HLoadClass(graph_->GetCurrentMethod(),
+ class_index,
+ caller_dex_file,
+ klass,
+ is_referrer,
+ invoke_instruction->GetDexPc(),
+ /* needs_access_check */ false);
HLoadClass::LoadKind kind = HSharpening::ComputeLoadClassKind(
load_class, codegen_, compiler_driver_, caller_compilation_unit_);
DCHECK(kind != HLoadClass::LoadKind::kInvalid)
@@ -887,11 +887,11 @@
load_class->CopyEnvironmentFrom(invoke_instruction->GetEnvironment());
}
- HNotEqual* compare = new (graph_->GetArena()) HNotEqual(load_class, receiver_class);
+ HNotEqual* compare = new (graph_->GetAllocator()) HNotEqual(load_class, receiver_class);
bb_cursor->InsertInstructionAfter(compare, load_class);
if (with_deoptimization) {
- HDeoptimize* deoptimize = new (graph_->GetArena()) HDeoptimize(
- graph_->GetArena(),
+ HDeoptimize* deoptimize = new (graph_->GetAllocator()) HDeoptimize(
+ graph_->GetAllocator(),
compare,
receiver,
Runtime::Current()->IsAotCompiler()
@@ -1012,7 +1012,7 @@
uint32_t dex_pc = invoke_instruction->GetDexPc();
HBasicBlock* cursor_block = compare->GetBlock();
HBasicBlock* original_invoke_block = invoke_instruction->GetBlock();
- ArenaAllocator* allocator = graph_->GetArena();
+ ArenaAllocator* allocator = graph_->GetAllocator();
// Spit the block after the compare: `cursor_block` will now be the start of the diamond,
// and the returned block is the start of the then branch (that could contain multiple blocks).
@@ -1147,7 +1147,7 @@
DataType::Type type = Is64BitInstructionSet(graph_->GetInstructionSet())
? DataType::Type::kInt64
: DataType::Type::kInt32;
- HClassTableGet* class_table_get = new (graph_->GetArena()) HClassTableGet(
+ HClassTableGet* class_table_get = new (graph_->GetAllocator()) HClassTableGet(
receiver_class,
type,
invoke_instruction->IsInvokeVirtual() ? HClassTableGet::TableKind::kVTable
@@ -1164,7 +1164,7 @@
reinterpret_cast<intptr_t>(actual_method), invoke_instruction->GetDexPc());
}
- HNotEqual* compare = new (graph_->GetArena()) HNotEqual(class_table_get, constant);
+ HNotEqual* compare = new (graph_->GetAllocator()) HNotEqual(class_table_get, constant);
if (cursor != nullptr) {
bb_cursor->InsertInstructionAfter(receiver_class, cursor);
} else {
@@ -1176,8 +1176,8 @@
if (outermost_graph_->IsCompilingOsr()) {
CreateDiamondPatternForPolymorphicInline(compare, return_replacement, invoke_instruction);
} else {
- HDeoptimize* deoptimize = new (graph_->GetArena()) HDeoptimize(
- graph_->GetArena(),
+ HDeoptimize* deoptimize = new (graph_->GetAllocator()) HDeoptimize(
+ graph_->GetAllocator(),
compare,
receiver,
DeoptimizationKind::kJitSameTarget,
@@ -1240,8 +1240,8 @@
if (dex_method_index == dex::kDexNoIndex) {
return false;
}
- HInvokeVirtual* new_invoke = new (graph_->GetArena()) HInvokeVirtual(
- graph_->GetArena(),
+ HInvokeVirtual* new_invoke = new (graph_->GetAllocator()) HInvokeVirtual(
+ graph_->GetAllocator(),
invoke_instruction->GetNumberOfArguments(),
invoke_instruction->GetType(),
invoke_instruction->GetDexPc(),
@@ -1517,7 +1517,7 @@
DCHECK(obj != nullptr) << "only non-static methods can have a constructor fence";
HConstructorFence* constructor_fence =
- new (graph_->GetArena()) HConstructorFence(obj, kNoDexPc, graph_->GetArena());
+ new (graph_->GetAllocator()) HConstructorFence(obj, kNoDexPc, graph_->GetAllocator());
invoke_instruction->GetBlock()->InsertInstructionBefore(constructor_fence,
invoke_instruction);
}
@@ -1539,7 +1539,7 @@
ArtField* resolved_field =
class_linker->LookupResolvedField(field_index, referrer, /* is_static */ false);
DCHECK(resolved_field != nullptr);
- HInstanceFieldGet* iget = new (graph_->GetArena()) HInstanceFieldGet(
+ HInstanceFieldGet* iget = new (graph_->GetAllocator()) HInstanceFieldGet(
obj,
resolved_field,
DataType::FromShorty(resolved_field->GetTypeDescriptor()[0]),
@@ -1579,7 +1579,7 @@
DCHECK(referrer->IsConstructor());
*is_final = resolved_field->IsFinal();
}
- HInstanceFieldSet* iput = new (graph_->GetArena()) HInstanceFieldSet(
+ HInstanceFieldSet* iput = new (graph_->GetAllocator()) HInstanceFieldSet(
obj,
value,
resolved_field,
@@ -1641,8 +1641,9 @@
}
const int32_t caller_instruction_counter = graph_->GetCurrentInstructionId();
- HGraph* callee_graph = new (graph_->GetArena()) HGraph(
- graph_->GetArena(),
+ HGraph* callee_graph = new (graph_->GetAllocator()) HGraph(
+ graph_->GetAllocator(),
+ graph_->GetArenaStack(),
callee_dex_file,
method_index,
compiler_driver_->GetInstructionSet(),
@@ -1659,7 +1660,7 @@
if (stats_ != nullptr) {
// Reuse one object for all inline attempts from this caller to keep Arena memory usage low.
if (inline_stats_ == nullptr) {
- void* storage = graph_->GetArena()->Alloc<OptimizingCompilerStats>(kArenaAllocMisc);
+ void* storage = graph_->GetAllocator()->Alloc<OptimizingCompilerStats>(kArenaAllocMisc);
inline_stats_ = new (storage) OptimizingCompilerStats;
} else {
inline_stats_->Reset();
@@ -1672,7 +1673,6 @@
codegen_,
inline_stats_,
resolved_method->GetQuickenedInfo(class_linker->GetImagePointerSize()),
- dex_cache,
handles_);
if (builder.BuildGraph() != kAnalysisSuccess) {
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 6ad8036..b06d91c 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -59,8 +59,8 @@
// the first throwing instruction.
HInstruction* current_local_value = (*current_locals_)[i];
if (current_local_value != nullptr) {
- HPhi* phi = new (arena_) HPhi(
- arena_,
+ HPhi* phi = new (allocator_) HPhi(
+ allocator_,
i,
0,
current_local_value->GetType());
@@ -109,8 +109,8 @@
HInstruction* incoming =
ValueOfLocalAt(current_block_->GetLoopInformation()->GetPreHeader(), local);
if (incoming != nullptr) {
- HPhi* phi = new (arena_) HPhi(
- arena_,
+ HPhi* phi = new (allocator_) HPhi(
+ allocator_,
local,
0,
incoming->GetType());
@@ -148,8 +148,8 @@
if (is_different) {
HInstruction* first_input = ValueOfLocalAt(current_block_->GetPredecessors()[0], local);
- HPhi* phi = new (arena_) HPhi(
- arena_,
+ HPhi* phi = new (allocator_) HPhi(
+ allocator_,
local,
current_block_->GetPredecessors().size(),
first_input->GetType());
@@ -210,8 +210,8 @@
void HInstructionBuilder::InitializeInstruction(HInstruction* instruction) {
if (instruction->NeedsEnvironment()) {
- HEnvironment* environment = new (arena_) HEnvironment(
- arena_,
+ HEnvironment* environment = new (allocator_) HEnvironment(
+ allocator_,
current_locals_->size(),
graph_->GetArtMethod(),
instruction->GetDexPc(),
@@ -227,7 +227,7 @@
return ref;
}
- HNullCheck* null_check = new (arena_) HNullCheck(ref, dex_pc);
+ HNullCheck* null_check = new (allocator_) HNullCheck(ref, dex_pc);
AppendInstruction(null_check);
return null_check;
}
@@ -265,7 +265,7 @@
bool HInstructionBuilder::Build() {
locals_for_.resize(graph_->GetBlocks().size(),
- ArenaVector<HInstruction*>(arena_->Adapter(kArenaAllocGraphBuilder)));
+ ArenaVector<HInstruction*>(allocator_->Adapter(kArenaAllocGraphBuilder)));
// Find locations where we want to generate extra stackmaps for native debugging.
// This allows us to generate the info only at interesting points (for example,
@@ -275,7 +275,8 @@
ArenaBitVector* native_debug_info_locations = nullptr;
if (native_debuggable) {
const uint32_t num_instructions = code_item_.insns_size_in_code_units_;
- native_debug_info_locations = new (arena_) ArenaBitVector (arena_, num_instructions, false);
+ native_debug_info_locations =
+ new (allocator_) ArenaBitVector (allocator_, num_instructions, false);
FindNativeDebugInfoLocations(native_debug_info_locations);
}
@@ -287,14 +288,14 @@
if (current_block_->IsEntryBlock()) {
InitializeParameters();
- AppendInstruction(new (arena_) HSuspendCheck(0u));
- AppendInstruction(new (arena_) HGoto(0u));
+ AppendInstruction(new (allocator_) HSuspendCheck(0u));
+ AppendInstruction(new (allocator_) HGoto(0u));
continue;
} else if (current_block_->IsExitBlock()) {
- AppendInstruction(new (arena_) HExit());
+ AppendInstruction(new (allocator_) HExit());
continue;
} else if (current_block_->IsLoopHeader()) {
- HSuspendCheck* suspend_check = new (arena_) HSuspendCheck(current_block_->GetDexPc());
+ HSuspendCheck* suspend_check = new (allocator_) HSuspendCheck(current_block_->GetDexPc());
current_block_->GetLoopInformation()->SetSuspendCheck(suspend_check);
// This is slightly odd because the loop header might not be empty (TryBoundary).
// But we're still creating the environment with locals from the top of the block.
@@ -331,7 +332,7 @@
}
if (native_debuggable && native_debug_info_locations->IsBitSet(dex_pc)) {
- AppendInstruction(new (arena_) HNativeDebugInfo(dex_pc));
+ AppendInstruction(new (allocator_) HNativeDebugInfo(dex_pc));
}
if (!ProcessDexInstruction(it.CurrentInstruction(), dex_pc, quicken_index)) {
@@ -348,7 +349,7 @@
// instruction of the current block is not a branching instruction.
// We add an unconditional Goto to the next block.
DCHECK_EQ(current_block_->GetSuccessors().size(), 1u);
- AppendInstruction(new (arena_) HGoto());
+ AppendInstruction(new (allocator_) HGoto());
}
}
@@ -452,7 +453,7 @@
dex_file_->GetMethodId(dex_compilation_unit_->GetDexMethodIndex());
if (!dex_compilation_unit_->IsStatic()) {
// Add the implicit 'this' argument, not expressed in the signature.
- HParameterValue* parameter = new (arena_) HParameterValue(*dex_file_,
+ HParameterValue* parameter = new (allocator_) HParameterValue(*dex_file_,
referrer_method_id.class_idx_,
parameter_index++,
DataType::Type::kReference,
@@ -468,7 +469,7 @@
const DexFile::ProtoId& proto = dex_file_->GetMethodPrototype(referrer_method_id);
const DexFile::TypeList* arg_types = dex_file_->GetProtoParameters(proto);
for (int i = 0, shorty_pos = 1; i < number_of_parameters; i++) {
- HParameterValue* parameter = new (arena_) HParameterValue(
+ HParameterValue* parameter = new (allocator_) HParameterValue(
*dex_file_,
arg_types->GetTypeItem(shorty_pos - 1).type_idx_,
parameter_index++,
@@ -491,18 +492,18 @@
void HInstructionBuilder::If_22t(const Instruction& instruction, uint32_t dex_pc) {
HInstruction* first = LoadLocal(instruction.VRegA(), DataType::Type::kInt32);
HInstruction* second = LoadLocal(instruction.VRegB(), DataType::Type::kInt32);
- T* comparison = new (arena_) T(first, second, dex_pc);
+ T* comparison = new (allocator_) T(first, second, dex_pc);
AppendInstruction(comparison);
- AppendInstruction(new (arena_) HIf(comparison, dex_pc));
+ AppendInstruction(new (allocator_) HIf(comparison, dex_pc));
current_block_ = nullptr;
}
template<typename T>
void HInstructionBuilder::If_21t(const Instruction& instruction, uint32_t dex_pc) {
HInstruction* value = LoadLocal(instruction.VRegA(), DataType::Type::kInt32);
- T* comparison = new (arena_) T(value, graph_->GetIntConstant(0, dex_pc), dex_pc);
+ T* comparison = new (allocator_) T(value, graph_->GetIntConstant(0, dex_pc), dex_pc);
AppendInstruction(comparison);
- AppendInstruction(new (arena_) HIf(comparison, dex_pc));
+ AppendInstruction(new (allocator_) HIf(comparison, dex_pc));
current_block_ = nullptr;
}
@@ -511,7 +512,7 @@
DataType::Type type,
uint32_t dex_pc) {
HInstruction* first = LoadLocal(instruction.VRegB(), type);
- AppendInstruction(new (arena_) T(type, first, dex_pc));
+ AppendInstruction(new (allocator_) T(type, first, dex_pc));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
@@ -520,7 +521,7 @@
DataType::Type result_type,
uint32_t dex_pc) {
HInstruction* first = LoadLocal(instruction.VRegB(), input_type);
- AppendInstruction(new (arena_) HTypeConversion(result_type, first, dex_pc));
+ AppendInstruction(new (allocator_) HTypeConversion(result_type, first, dex_pc));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
@@ -530,7 +531,7 @@
uint32_t dex_pc) {
HInstruction* first = LoadLocal(instruction.VRegB(), type);
HInstruction* second = LoadLocal(instruction.VRegC(), type);
- AppendInstruction(new (arena_) T(type, first, second, dex_pc));
+ AppendInstruction(new (allocator_) T(type, first, second, dex_pc));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
@@ -540,7 +541,7 @@
uint32_t dex_pc) {
HInstruction* first = LoadLocal(instruction.VRegB(), type);
HInstruction* second = LoadLocal(instruction.VRegC(), DataType::Type::kInt32);
- AppendInstruction(new (arena_) T(type, first, second, dex_pc));
+ AppendInstruction(new (allocator_) T(type, first, second, dex_pc));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
@@ -550,7 +551,7 @@
uint32_t dex_pc) {
HInstruction* first = LoadLocal(instruction.VRegB(), type);
HInstruction* second = LoadLocal(instruction.VRegC(), type);
- AppendInstruction(new (arena_) HCompare(type, first, second, bias, dex_pc));
+ AppendInstruction(new (allocator_) HCompare(type, first, second, bias, dex_pc));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
@@ -560,7 +561,7 @@
uint32_t dex_pc) {
HInstruction* first = LoadLocal(instruction.VRegA(), type);
HInstruction* second = LoadLocal(instruction.VRegB(), DataType::Type::kInt32);
- AppendInstruction(new (arena_) T(type, first, second, dex_pc));
+ AppendInstruction(new (allocator_) T(type, first, second, dex_pc));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
@@ -570,7 +571,7 @@
uint32_t dex_pc) {
HInstruction* first = LoadLocal(instruction.VRegA(), type);
HInstruction* second = LoadLocal(instruction.VRegB(), type);
- AppendInstruction(new (arena_) T(type, first, second, dex_pc));
+ AppendInstruction(new (allocator_) T(type, first, second, dex_pc));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
@@ -581,7 +582,7 @@
if (reverse) {
std::swap(first, second);
}
- AppendInstruction(new (arena_) T(DataType::Type::kInt32, first, second, dex_pc));
+ AppendInstruction(new (allocator_) T(DataType::Type::kInt32, first, second, dex_pc));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
@@ -592,7 +593,7 @@
if (reverse) {
std::swap(first, second);
}
- AppendInstruction(new (arena_) T(DataType::Type::kInt32, first, second, dex_pc));
+ AppendInstruction(new (allocator_) T(DataType::Type::kInt32, first, second, dex_pc));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
}
@@ -630,13 +631,13 @@
if (table.GetNumEntries() == 0) {
// Empty Switch. Code falls through to the next block.
DCHECK(IsFallthroughInstruction(instruction, dex_pc, current_block_));
- AppendInstruction(new (arena_) HGoto(dex_pc));
+ AppendInstruction(new (allocator_) HGoto(dex_pc));
} else if (table.ShouldBuildDecisionTree()) {
for (DexSwitchTableIterator it(table); !it.Done(); it.Advance()) {
HInstruction* case_value = graph_->GetIntConstant(it.CurrentKey(), dex_pc);
- HEqual* comparison = new (arena_) HEqual(value, case_value, dex_pc);
+ HEqual* comparison = new (allocator_) HEqual(value, case_value, dex_pc);
AppendInstruction(comparison);
- AppendInstruction(new (arena_) HIf(comparison, dex_pc));
+ AppendInstruction(new (allocator_) HIf(comparison, dex_pc));
if (!it.IsLast()) {
current_block_ = FindBlockStartingAt(it.GetDexPcForCurrentIndex());
@@ -644,7 +645,7 @@
}
} else {
AppendInstruction(
- new (arena_) HPackedSwitch(table.GetEntryAt(0), table.GetNumEntries(), value, dex_pc));
+ new (allocator_) HPackedSwitch(table.GetEntryAt(0), table.GetNumEntries(), value, dex_pc));
}
current_block_ = nullptr;
@@ -664,16 +665,16 @@
HInstruction* fence_target = current_this_parameter_;
DCHECK(fence_target != nullptr);
- AppendInstruction(new (arena_) HConstructorFence(fence_target, dex_pc, arena_));
+ AppendInstruction(new (allocator_) HConstructorFence(fence_target, dex_pc, allocator_));
MaybeRecordStat(
compilation_stats_,
MethodCompilationStat::kConstructorFenceGeneratedFinal);
}
- AppendInstruction(new (arena_) HReturnVoid(dex_pc));
+ AppendInstruction(new (allocator_) HReturnVoid(dex_pc));
} else {
DCHECK(!RequiresConstructorBarrier(dex_compilation_unit_, compiler_driver_));
HInstruction* value = LoadLocal(instruction.VRegA(), type);
- AppendInstruction(new (arena_) HReturn(value, dex_pc));
+ AppendInstruction(new (allocator_) HReturn(value, dex_pc));
}
current_block_ = nullptr;
}
@@ -816,12 +817,12 @@
if (UNLIKELY(resolved_method == nullptr)) {
MaybeRecordStat(compilation_stats_,
MethodCompilationStat::kUnresolvedMethod);
- HInvoke* invoke = new (arena_) HInvokeUnresolved(arena_,
- number_of_arguments,
- return_type,
- dex_pc,
- method_idx,
- invoke_type);
+ HInvoke* invoke = new (allocator_) HInvokeUnresolved(allocator_,
+ number_of_arguments,
+ return_type,
+ dex_pc,
+ method_idx,
+ invoke_type);
return HandleInvoke(invoke,
number_of_vreg_arguments,
args,
@@ -841,8 +842,8 @@
dchecked_integral_cast<uint64_t>(string_init_entry_point)
};
MethodReference target_method(dex_file_, method_idx);
- HInvoke* invoke = new (arena_) HInvokeStaticOrDirect(
- arena_,
+ HInvoke* invoke = new (allocator_) HInvokeStaticOrDirect(
+ allocator_,
number_of_arguments - 1,
DataType::Type::kReference /*return_type */,
dex_pc,
@@ -887,35 +888,35 @@
};
MethodReference target_method(resolved_method->GetDexFile(),
resolved_method->GetDexMethodIndex());
- invoke = new (arena_) HInvokeStaticOrDirect(arena_,
- number_of_arguments,
- return_type,
- dex_pc,
- method_idx,
- resolved_method,
- dispatch_info,
- invoke_type,
- target_method,
- clinit_check_requirement);
+ invoke = new (allocator_) HInvokeStaticOrDirect(allocator_,
+ number_of_arguments,
+ return_type,
+ dex_pc,
+ method_idx,
+ resolved_method,
+ dispatch_info,
+ invoke_type,
+ target_method,
+ clinit_check_requirement);
} else if (invoke_type == kVirtual) {
ScopedObjectAccess soa(Thread::Current()); // Needed for the method index
- invoke = new (arena_) HInvokeVirtual(arena_,
- number_of_arguments,
- return_type,
- dex_pc,
- method_idx,
- resolved_method,
- resolved_method->GetMethodIndex());
+ invoke = new (allocator_) HInvokeVirtual(allocator_,
+ number_of_arguments,
+ return_type,
+ dex_pc,
+ method_idx,
+ resolved_method,
+ resolved_method->GetMethodIndex());
} else {
DCHECK_EQ(invoke_type, kInterface);
ScopedObjectAccess soa(Thread::Current()); // Needed for the IMT index.
- invoke = new (arena_) HInvokeInterface(arena_,
- number_of_arguments,
- return_type,
- dex_pc,
- method_idx,
- resolved_method,
- ImTable::GetImtIndex(resolved_method));
+ invoke = new (allocator_) HInvokeInterface(allocator_,
+ number_of_arguments,
+ return_type,
+ dex_pc,
+ method_idx,
+ resolved_method,
+ ImTable::GetImtIndex(resolved_method));
}
return HandleInvoke(invoke,
@@ -940,11 +941,11 @@
DCHECK_EQ(1 + ArtMethod::NumArgRegisters(descriptor), number_of_vreg_arguments);
DataType::Type return_type = DataType::FromShorty(descriptor[0]);
size_t number_of_arguments = strlen(descriptor);
- HInvoke* invoke = new (arena_) HInvokePolymorphic(arena_,
- number_of_arguments,
- return_type,
- dex_pc,
- method_idx);
+ HInvoke* invoke = new (allocator_) HInvokePolymorphic(allocator_,
+ number_of_arguments,
+ return_type,
+ dex_pc,
+ method_idx);
return HandleInvoke(invoke,
number_of_vreg_arguments,
args,
@@ -964,7 +965,7 @@
Handle<mirror::Class> klass = load_class->GetClass();
if (!IsInitialized(klass)) {
- cls = new (arena_) HClinitCheck(load_class, dex_pc);
+ cls = new (allocator_) HClinitCheck(load_class, dex_pc);
AppendInstruction(cls);
}
@@ -979,7 +980,7 @@
// Consider classes we haven't resolved as potentially finalizable.
bool finalizable = (klass == nullptr) || klass->IsFinalizable();
- HNewInstance* new_instance = new (arena_) HNewInstance(
+ HNewInstance* new_instance = new (allocator_) HNewInstance(
cls,
dex_pc,
type_index,
@@ -1036,7 +1037,7 @@
// (and in theory the 0-initializing, but that happens automatically
// when new memory pages are mapped in by the OS).
HConstructorFence* ctor_fence =
- new (arena_) HConstructorFence(allocation, allocation->GetDexPc(), arena_);
+ new (allocator_) HConstructorFence(allocation, allocation->GetDexPc(), allocator_);
AppendInstruction(ctor_fence);
MaybeRecordStat(
compilation_stats_,
@@ -1090,7 +1091,7 @@
/* needs_access_check */ false);
if (cls != nullptr) {
*clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit;
- clinit_check = new (arena_) HClinitCheck(cls, dex_pc);
+ clinit_check = new (allocator_) HClinitCheck(cls, dex_pc);
AppendInstruction(clinit_check);
}
}
@@ -1290,23 +1291,23 @@
if (resolved_field == nullptr) {
MaybeRecordStat(compilation_stats_,
MethodCompilationStat::kUnresolvedField);
- field_set = new (arena_) HUnresolvedInstanceFieldSet(object,
- value,
- field_type,
- field_index,
- dex_pc);
+ field_set = new (allocator_) HUnresolvedInstanceFieldSet(object,
+ value,
+ field_type,
+ field_index,
+ dex_pc);
} else {
uint16_t class_def_index = resolved_field->GetDeclaringClass()->GetDexClassDefIndex();
- field_set = new (arena_) HInstanceFieldSet(object,
- value,
- resolved_field,
- field_type,
- resolved_field->GetOffset(),
- resolved_field->IsVolatile(),
- field_index,
- class_def_index,
- *dex_file_,
- dex_pc);
+ field_set = new (allocator_) HInstanceFieldSet(object,
+ value,
+ resolved_field,
+ field_type,
+ resolved_field->GetOffset(),
+ resolved_field->IsVolatile(),
+ field_index,
+ class_def_index,
+ *dex_file_,
+ dex_pc);
}
AppendInstruction(field_set);
} else {
@@ -1314,21 +1315,21 @@
if (resolved_field == nullptr) {
MaybeRecordStat(compilation_stats_,
MethodCompilationStat::kUnresolvedField);
- field_get = new (arena_) HUnresolvedInstanceFieldGet(object,
- field_type,
- field_index,
- dex_pc);
+ field_get = new (allocator_) HUnresolvedInstanceFieldGet(object,
+ field_type,
+ field_index,
+ dex_pc);
} else {
uint16_t class_def_index = resolved_field->GetDeclaringClass()->GetDexClassDefIndex();
- field_get = new (arena_) HInstanceFieldGet(object,
- resolved_field,
- field_type,
- resolved_field->GetOffset(),
- resolved_field->IsVolatile(),
- field_index,
- class_def_index,
- *dex_file_,
- dex_pc);
+ field_get = new (allocator_) HInstanceFieldGet(object,
+ resolved_field,
+ field_type,
+ resolved_field->GetOffset(),
+ resolved_field->IsVolatile(),
+ field_index,
+ class_def_index,
+ *dex_file_,
+ dex_pc);
}
AppendInstruction(field_get);
UpdateLocal(source_or_dest_reg, field_get);
@@ -1382,9 +1383,9 @@
if (is_put) {
HInstruction* value = LoadLocal(source_or_dest_reg, field_type);
AppendInstruction(
- new (arena_) HUnresolvedStaticFieldSet(value, field_type, field_index, dex_pc));
+ new (allocator_) HUnresolvedStaticFieldSet(value, field_type, field_index, dex_pc));
} else {
- AppendInstruction(new (arena_) HUnresolvedStaticFieldGet(field_type, field_index, dex_pc));
+ AppendInstruction(new (allocator_) HUnresolvedStaticFieldGet(field_type, field_index, dex_pc));
UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction());
}
}
@@ -1475,7 +1476,7 @@
HInstruction* cls = constant;
if (!IsInitialized(klass)) {
- cls = new (arena_) HClinitCheck(constant, dex_pc);
+ cls = new (allocator_) HClinitCheck(constant, dex_pc);
AppendInstruction(cls);
}
@@ -1484,38 +1485,38 @@
// We need to keep the class alive before loading the value.
HInstruction* value = LoadLocal(source_or_dest_reg, field_type);
DCHECK_EQ(HPhi::ToPhiType(value->GetType()), HPhi::ToPhiType(field_type));
- AppendInstruction(new (arena_) HStaticFieldSet(cls,
- value,
- resolved_field,
- field_type,
- resolved_field->GetOffset(),
- resolved_field->IsVolatile(),
- field_index,
- class_def_index,
- *dex_file_,
- dex_pc));
+ AppendInstruction(new (allocator_) HStaticFieldSet(cls,
+ value,
+ resolved_field,
+ field_type,
+ resolved_field->GetOffset(),
+ resolved_field->IsVolatile(),
+ field_index,
+ class_def_index,
+ *dex_file_,
+ dex_pc));
} else {
- AppendInstruction(new (arena_) HStaticFieldGet(cls,
- resolved_field,
- field_type,
- resolved_field->GetOffset(),
- resolved_field->IsVolatile(),
- field_index,
- class_def_index,
- *dex_file_,
- dex_pc));
+ AppendInstruction(new (allocator_) HStaticFieldGet(cls,
+ resolved_field,
+ field_type,
+ resolved_field->GetOffset(),
+ resolved_field->IsVolatile(),
+ field_index,
+ class_def_index,
+ *dex_file_,
+ dex_pc));
UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction());
}
return true;
}
void HInstructionBuilder::BuildCheckedDivRem(uint16_t out_vreg,
- uint16_t first_vreg,
- int64_t second_vreg_or_constant,
- uint32_t dex_pc,
- DataType::Type type,
- bool second_is_constant,
- bool isDiv) {
+ uint16_t first_vreg,
+ int64_t second_vreg_or_constant,
+ uint32_t dex_pc,
+ DataType::Type type,
+ bool second_is_constant,
+ bool isDiv) {
DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64);
HInstruction* first = LoadLocal(first_vreg, type);
@@ -1533,14 +1534,14 @@
if (!second_is_constant
|| (type == DataType::Type::kInt32 && second->AsIntConstant()->GetValue() == 0)
|| (type == DataType::Type::kInt64 && second->AsLongConstant()->GetValue() == 0)) {
- second = new (arena_) HDivZeroCheck(second, dex_pc);
+ second = new (allocator_) HDivZeroCheck(second, dex_pc);
AppendInstruction(second);
}
if (isDiv) {
- AppendInstruction(new (arena_) HDiv(type, first, second, dex_pc));
+ AppendInstruction(new (allocator_) HDiv(type, first, second, dex_pc));
} else {
- AppendInstruction(new (arena_) HRem(type, first, second, dex_pc));
+ AppendInstruction(new (allocator_) HRem(type, first, second, dex_pc));
}
UpdateLocal(out_vreg, current_block_->GetLastInstruction());
}
@@ -1554,19 +1555,19 @@
uint8_t index_reg = instruction.VRegC_23x();
HInstruction* object = LoadNullCheckedLocal(array_reg, dex_pc);
- HInstruction* length = new (arena_) HArrayLength(object, dex_pc);
+ HInstruction* length = new (allocator_) HArrayLength(object, dex_pc);
AppendInstruction(length);
HInstruction* index = LoadLocal(index_reg, DataType::Type::kInt32);
- index = new (arena_) HBoundsCheck(index, length, dex_pc);
+ index = new (allocator_) HBoundsCheck(index, length, dex_pc);
AppendInstruction(index);
if (is_put) {
HInstruction* value = LoadLocal(source_or_dest_reg, anticipated_type);
// TODO: Insert a type check node if the type is Object.
- HArraySet* aset = new (arena_) HArraySet(object, index, value, anticipated_type, dex_pc);
+ HArraySet* aset = new (allocator_) HArraySet(object, index, value, anticipated_type, dex_pc);
ssa_builder_->MaybeAddAmbiguousArraySet(aset);
AppendInstruction(aset);
} else {
- HArrayGet* aget = new (arena_) HArrayGet(object, index, anticipated_type, dex_pc);
+ HArrayGet* aget = new (allocator_) HArrayGet(object, index, anticipated_type, dex_pc);
ssa_builder_->MaybeAddAmbiguousArrayGet(aget);
AppendInstruction(aget);
UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction());
@@ -1582,7 +1583,7 @@
uint32_t register_index) {
HInstruction* length = graph_->GetIntConstant(number_of_vreg_arguments, dex_pc);
HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
- HNewArray* const object = new (arena_) HNewArray(cls, length, dex_pc);
+ HNewArray* const object = new (allocator_) HNewArray(cls, length, dex_pc);
AppendInstruction(object);
const char* descriptor = dex_file_->StringByTypeIdx(type_index);
@@ -1597,7 +1598,7 @@
for (size_t i = 0; i < number_of_vreg_arguments; ++i) {
HInstruction* value = LoadLocal(is_range ? register_index + i : args[i], type);
HInstruction* index = graph_->GetIntConstant(i, dex_pc);
- HArraySet* aset = new (arena_) HArraySet(object, index, value, type, dex_pc);
+ HArraySet* aset = new (allocator_) HArraySet(object, index, value, type, dex_pc);
ssa_builder_->MaybeAddAmbiguousArraySet(aset);
AppendInstruction(aset);
}
@@ -1615,7 +1616,7 @@
for (uint32_t i = 0; i < element_count; ++i) {
HInstruction* index = graph_->GetIntConstant(i, dex_pc);
HInstruction* value = graph_->GetIntConstant(data[i], dex_pc);
- HArraySet* aset = new (arena_) HArraySet(object, index, value, anticipated_type, dex_pc);
+ HArraySet* aset = new (allocator_) HArraySet(object, index, value, anticipated_type, dex_pc);
ssa_builder_->MaybeAddAmbiguousArraySet(aset);
AppendInstruction(aset);
}
@@ -1635,13 +1636,13 @@
return;
}
- HInstruction* length = new (arena_) HArrayLength(array, dex_pc);
+ HInstruction* length = new (allocator_) HArrayLength(array, dex_pc);
AppendInstruction(length);
// Implementation of this DEX instruction seems to be that the bounds check is
// done before doing any stores.
HInstruction* last_index = graph_->GetIntConstant(payload->element_count - 1, dex_pc);
- AppendInstruction(new (arena_) HBoundsCheck(last_index, length, dex_pc));
+ AppendInstruction(new (allocator_) HBoundsCheck(last_index, length, dex_pc));
switch (payload->element_width) {
case 1:
@@ -1684,7 +1685,8 @@
for (uint32_t i = 0; i < element_count; ++i) {
HInstruction* index = graph_->GetIntConstant(i, dex_pc);
HInstruction* value = graph_->GetLongConstant(data[i], dex_pc);
- HArraySet* aset = new (arena_) HArraySet(object, index, value, DataType::Type::kInt64, dex_pc);
+ HArraySet* aset =
+ new (allocator_) HArraySet(object, index, value, DataType::Type::kInt64, dex_pc);
ssa_builder_->MaybeAddAmbiguousArraySet(aset);
AppendInstruction(aset);
}
@@ -1752,7 +1754,7 @@
}
// Note: `klass` must be from `handles_`.
- HLoadClass* load_class = new (arena_) HLoadClass(
+ HLoadClass* load_class = new (allocator_) HLoadClass(
graph_->GetCurrentMethod(),
type_index,
*actual_dex_file,
@@ -1787,15 +1789,15 @@
ScopedObjectAccess soa(Thread::Current());
TypeCheckKind check_kind = ComputeTypeCheckKind(cls->GetClass());
if (instruction.Opcode() == Instruction::INSTANCE_OF) {
- AppendInstruction(new (arena_) HInstanceOf(object, cls, check_kind, dex_pc));
+ AppendInstruction(new (allocator_) HInstanceOf(object, cls, check_kind, dex_pc));
UpdateLocal(destination, current_block_->GetLastInstruction());
} else {
DCHECK_EQ(instruction.Opcode(), Instruction::CHECK_CAST);
// We emit a CheckCast followed by a BoundType. CheckCast is a statement
// which may throw. If it succeeds BoundType sets the new type of `object`
// for all subsequent uses.
- AppendInstruction(new (arena_) HCheckCast(object, cls, check_kind, dex_pc));
- AppendInstruction(new (arena_) HBoundType(object, dex_pc));
+ AppendInstruction(new (allocator_) HCheckCast(object, cls, check_kind, dex_pc));
+ AppendInstruction(new (allocator_) HBoundType(object, dex_pc));
UpdateLocal(reference, current_block_->GetLastInstruction());
}
}
@@ -1943,7 +1945,7 @@
case Instruction::GOTO:
case Instruction::GOTO_16:
case Instruction::GOTO_32: {
- AppendInstruction(new (arena_) HGoto(dex_pc));
+ AppendInstruction(new (allocator_) HGoto(dex_pc));
current_block_ = nullptr;
break;
}
@@ -2580,7 +2582,7 @@
HInstruction* length = LoadLocal(instruction.VRegB_22c(), DataType::Type::kInt32);
HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
- HNewArray* new_array = new (arena_) HNewArray(cls, length, dex_pc);
+ HNewArray* new_array = new (allocator_) HNewArray(cls, length, dex_pc);
AppendInstruction(new_array);
UpdateLocal(instruction.VRegA_22c(), current_block_->GetLastInstruction());
BuildConstructorFenceForAllocation(new_array);
@@ -2744,23 +2746,27 @@
case Instruction::ARRAY_LENGTH: {
HInstruction* object = LoadNullCheckedLocal(instruction.VRegB_12x(), dex_pc);
- AppendInstruction(new (arena_) HArrayLength(object, dex_pc));
+ AppendInstruction(new (allocator_) HArrayLength(object, dex_pc));
UpdateLocal(instruction.VRegA_12x(), current_block_->GetLastInstruction());
break;
}
case Instruction::CONST_STRING: {
dex::StringIndex string_index(instruction.VRegB_21c());
- AppendInstruction(
- new (arena_) HLoadString(graph_->GetCurrentMethod(), string_index, *dex_file_, dex_pc));
+ AppendInstruction(new (allocator_) HLoadString(graph_->GetCurrentMethod(),
+ string_index,
+ *dex_file_,
+ dex_pc));
UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
break;
}
case Instruction::CONST_STRING_JUMBO: {
dex::StringIndex string_index(instruction.VRegB_31c());
- AppendInstruction(
- new (arena_) HLoadString(graph_->GetCurrentMethod(), string_index, *dex_file_, dex_pc));
+ AppendInstruction(new (allocator_) HLoadString(graph_->GetCurrentMethod(),
+ string_index,
+ *dex_file_,
+ dex_pc));
UpdateLocal(instruction.VRegA_31c(), current_block_->GetLastInstruction());
break;
}
@@ -2773,15 +2779,15 @@
}
case Instruction::MOVE_EXCEPTION: {
- AppendInstruction(new (arena_) HLoadException(dex_pc));
+ AppendInstruction(new (allocator_) HLoadException(dex_pc));
UpdateLocal(instruction.VRegA_11x(), current_block_->GetLastInstruction());
- AppendInstruction(new (arena_) HClearException(dex_pc));
+ AppendInstruction(new (allocator_) HClearException(dex_pc));
break;
}
case Instruction::THROW: {
HInstruction* exception = LoadLocal(instruction.VRegA_11x(), DataType::Type::kReference);
- AppendInstruction(new (arena_) HThrow(exception, dex_pc));
+ AppendInstruction(new (allocator_) HThrow(exception, dex_pc));
// We finished building this block. Set the current block to null to avoid
// adding dead instructions to it.
current_block_ = nullptr;
@@ -2804,7 +2810,7 @@
}
case Instruction::MONITOR_ENTER: {
- AppendInstruction(new (arena_) HMonitorOperation(
+ AppendInstruction(new (allocator_) HMonitorOperation(
LoadLocal(instruction.VRegA_11x(), DataType::Type::kReference),
HMonitorOperation::OperationKind::kEnter,
dex_pc));
@@ -2812,7 +2818,7 @@
}
case Instruction::MONITOR_EXIT: {
- AppendInstruction(new (arena_) HMonitorOperation(
+ AppendInstruction(new (allocator_) HMonitorOperation(
LoadLocal(instruction.VRegA_11x(), DataType::Type::kReference),
HMonitorOperation::OperationKind::kExit,
dex_pc));
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index a684bf4..79d6ddc 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -43,15 +43,15 @@
const DexFile* dex_file,
const DexFile::CodeItem& code_item,
DataType::Type return_type,
- DexCompilationUnit* dex_compilation_unit,
- const DexCompilationUnit* const outer_compilation_unit,
+ const DexCompilationUnit* dex_compilation_unit,
+ const DexCompilationUnit* outer_compilation_unit,
CompilerDriver* driver,
CodeGenerator* code_generator,
const uint8_t* interpreter_metadata,
OptimizingCompilerStats* compiler_stats,
Handle<mirror::DexCache> dex_cache,
VariableSizedHandleScope* handles)
- : arena_(graph->GetArena()),
+ : allocator_(graph->GetAllocator()),
graph_(graph),
handles_(handles),
dex_file_(dex_file),
@@ -59,7 +59,7 @@
return_type_(return_type),
block_builder_(block_builder),
ssa_builder_(ssa_builder),
- locals_for_(arena_->Adapter(kArenaAllocGraphBuilder)),
+ locals_for_(allocator_->Adapter(kArenaAllocGraphBuilder)),
current_block_(nullptr),
current_locals_(nullptr),
latest_result_(nullptr),
@@ -71,7 +71,7 @@
quicken_info_(interpreter_metadata),
compilation_stats_(compiler_stats),
dex_cache_(dex_cache),
- loop_headers_(graph->GetArena()->Adapter(kArenaAllocGraphBuilder)) {
+ loop_headers_(allocator_->Adapter(kArenaAllocGraphBuilder)) {
loop_headers_.reserve(kDefaultNumberOfLoops);
}
@@ -312,7 +312,7 @@
ObjPtr<mirror::Class> LookupReferrerClass() const REQUIRES_SHARED(Locks::mutator_lock_);
- ArenaAllocator* const arena_;
+ ArenaAllocator* const allocator_;
HGraph* const graph_;
VariableSizedHandleScope* handles_;
@@ -342,7 +342,7 @@
// The compilation unit of the current method being compiled. Note that
// it can be an inlined method.
- DexCompilationUnit* const dex_compilation_unit_;
+ const DexCompilationUnit* const dex_compilation_unit_;
// The compilation unit of the outermost method being compiled. That is the
// method being compiled (and not inlined), and potentially inlining other
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 1a2494a..f39acab 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -186,7 +186,7 @@
binop->ReplaceInput(right_neg->GetInput(), 1);
left_neg->GetBlock()->RemoveInstruction(left_neg);
right_neg->GetBlock()->RemoveInstruction(right_neg);
- HNeg* neg = new (GetGraph()->GetArena()) HNeg(binop->GetType(), binop);
+ HNeg* neg = new (GetGraph()->GetAllocator()) HNeg(binop->GetType(), binop);
binop->GetBlock()->InsertInstructionBefore(neg, binop->GetNext());
binop->ReplaceWithExceptInReplacementAtIndex(neg, 0);
RecordSimplification();
@@ -225,15 +225,15 @@
// Replace the `HAnd` or `HOr`.
HBinaryOperation* hbin;
if (op->IsAnd()) {
- hbin = new (GetGraph()->GetArena()) HOr(type, src_left, src_right, dex_pc);
+ hbin = new (GetGraph()->GetAllocator()) HOr(type, src_left, src_right, dex_pc);
} else {
- hbin = new (GetGraph()->GetArena()) HAnd(type, src_left, src_right, dex_pc);
+ hbin = new (GetGraph()->GetAllocator()) HAnd(type, src_left, src_right, dex_pc);
}
HInstruction* hnot;
if (left->IsBooleanNot()) {
- hnot = new (GetGraph()->GetArena()) HBooleanNot(hbin, dex_pc);
+ hnot = new (GetGraph()->GetAllocator()) HBooleanNot(hbin, dex_pc);
} else {
- hnot = new (GetGraph()->GetArena()) HNot(type, hbin, dex_pc);
+ hnot = new (GetGraph()->GetAllocator()) HNot(type, hbin, dex_pc);
}
op->GetBlock()->InsertInstructionBefore(hbin, op);
@@ -251,7 +251,8 @@
InstructionSet isa = codegen_->GetInstructionSet();
switch (isa) {
case kArm64:
- if (!(type == DataType::Type::kInt8 ||
+ if (!(type == DataType::Type::kUint8 ||
+ type == DataType::Type::kInt8 ||
type == DataType::Type::kUint16 ||
type == DataType::Type::kInt16 ||
type == DataType::Type::kInt32)) {
@@ -260,7 +261,8 @@
break;
case kMips:
case kMips64:
- if (!(type == DataType::Type::kInt8 ||
+ if (!(type == DataType::Type::kUint8 ||
+ type == DataType::Type::kInt8 ||
type == DataType::Type::kUint16 ||
type == DataType::Type::kInt16 ||
type == DataType::Type::kInt32 ||
@@ -272,7 +274,7 @@
return false;
}
- ArenaAllocator* arena = mul->GetBlock()->GetGraph()->GetArena();
+ ArenaAllocator* allocator = mul->GetBlock()->GetGraph()->GetAllocator();
if (mul->HasOnlyOneNonEnvironmentUse()) {
HInstruction* use = mul->GetUses().front().GetUser();
@@ -305,13 +307,14 @@
use->IsVecAdd() ? HInstruction::kAdd : HInstruction::kSub;
if (accumulator != nullptr) {
HVecMultiplyAccumulate* mulacc =
- new (arena) HVecMultiplyAccumulate(arena,
- kind,
- accumulator,
- mul->GetLeft(),
- mul->GetRight(),
- binop->GetPackedType(),
- binop->GetVectorLength());
+ new (allocator) HVecMultiplyAccumulate(allocator,
+ kind,
+ accumulator,
+ mul->GetLeft(),
+ mul->GetRight(),
+ binop->GetPackedType(),
+ binop->GetVectorLength(),
+ binop->GetDexPc());
binop->GetBlock()->ReplaceAndRemoveInstructionWith(binop, mulacc);
DCHECK(!mul->HasUses());
@@ -360,18 +363,36 @@
}
// Shift operations implicitly mask the shift amount according to the type width. Get rid of
- // unnecessary explicit masking operations on the shift amount.
+ // unnecessary And/Or/Xor/Add/Sub/TypeConversion operations on the shift amount that do not
+ // affect the relevant bits.
// Replace code looking like
- // AND masked_shift, shift, <superset of implicit mask>
- // SHL dst, value, masked_shift
+ // AND adjusted_shift, shift, <superset of implicit mask>
+ // [OR/XOR/ADD/SUB adjusted_shift, shift, <value not overlapping with implicit mask>]
+ // [<conversion-from-integral-non-64-bit-type> adjusted_shift, shift]
+ // SHL dst, value, adjusted_shift
// with
// SHL dst, value, shift
- if (shift_amount->IsAnd()) {
- HAnd* and_insn = shift_amount->AsAnd();
- HConstant* mask = and_insn->GetConstantRight();
- if ((mask != nullptr) && ((Int64FromConstant(mask) & implicit_mask) == implicit_mask)) {
- instruction->ReplaceInput(and_insn->GetLeastConstantLeft(), 1);
+ if (shift_amount->IsAnd() ||
+ shift_amount->IsOr() ||
+ shift_amount->IsXor() ||
+ shift_amount->IsAdd() ||
+ shift_amount->IsSub()) {
+ int64_t required_result = shift_amount->IsAnd() ? implicit_mask : 0;
+ HBinaryOperation* bin_op = shift_amount->AsBinaryOperation();
+ HConstant* mask = bin_op->GetConstantRight();
+ if (mask != nullptr && (Int64FromConstant(mask) & implicit_mask) == required_result) {
+ instruction->ReplaceInput(bin_op->GetLeastConstantLeft(), 1);
RecordSimplification();
+ return;
+ }
+ } else if (shift_amount->IsTypeConversion()) {
+ DCHECK_NE(shift_amount->GetType(), DataType::Type::kBool); // We never convert to bool.
+ DataType::Type source_type = shift_amount->InputAt(0)->GetType();
+ // Non-integral and 64-bit source types require an explicit type conversion.
+ if (DataType::IsIntegralType(source_type) && !DataType::Is64BitType(source_type)) {
+ instruction->ReplaceInput(shift_amount->AsTypeConversion()->GetInput(), 1);
+ RecordSimplification();
+ return;
}
}
}
@@ -386,7 +407,8 @@
HUShr* ushr,
HShl* shl) {
DCHECK(op->IsAdd() || op->IsXor() || op->IsOr()) << op->DebugName();
- HRor* ror = new (GetGraph()->GetArena()) HRor(ushr->GetType(), ushr->GetLeft(), ushr->GetRight());
+ HRor* ror =
+ new (GetGraph()->GetAllocator()) HRor(ushr->GetType(), ushr->GetLeft(), ushr->GetRight());
op->GetBlock()->ReplaceAndRemoveInstructionWith(op, ror);
if (!ushr->HasUses()) {
ushr->GetBlock()->RemoveInstruction(ushr);
@@ -646,7 +668,7 @@
MaybeRecordStat(stats_, kRemovedInstanceOf);
if (outcome && can_be_null) {
// Type test will succeed, we just need a null test.
- HNotEqual* test = new (graph->GetArena()) HNotEqual(graph->GetNullConstant(), object);
+ HNotEqual* test = new (graph->GetAllocator()) HNotEqual(graph->GetNullConstant(), object);
instruction->GetBlock()->InsertInstructionBefore(test, instruction);
instruction->ReplaceWith(test);
} else {
@@ -678,30 +700,30 @@
}
}
-static HCondition* GetOppositeConditionSwapOps(ArenaAllocator* arena, HInstruction* cond) {
+static HCondition* GetOppositeConditionSwapOps(ArenaAllocator* allocator, HInstruction* cond) {
HInstruction *lhs = cond->InputAt(0);
HInstruction *rhs = cond->InputAt(1);
switch (cond->GetKind()) {
case HInstruction::kEqual:
- return new (arena) HEqual(rhs, lhs);
+ return new (allocator) HEqual(rhs, lhs);
case HInstruction::kNotEqual:
- return new (arena) HNotEqual(rhs, lhs);
+ return new (allocator) HNotEqual(rhs, lhs);
case HInstruction::kLessThan:
- return new (arena) HGreaterThan(rhs, lhs);
+ return new (allocator) HGreaterThan(rhs, lhs);
case HInstruction::kLessThanOrEqual:
- return new (arena) HGreaterThanOrEqual(rhs, lhs);
+ return new (allocator) HGreaterThanOrEqual(rhs, lhs);
case HInstruction::kGreaterThan:
- return new (arena) HLessThan(rhs, lhs);
+ return new (allocator) HLessThan(rhs, lhs);
case HInstruction::kGreaterThanOrEqual:
- return new (arena) HLessThanOrEqual(rhs, lhs);
+ return new (allocator) HLessThanOrEqual(rhs, lhs);
case HInstruction::kBelow:
- return new (arena) HAbove(rhs, lhs);
+ return new (allocator) HAbove(rhs, lhs);
case HInstruction::kBelowOrEqual:
- return new (arena) HAboveOrEqual(rhs, lhs);
+ return new (allocator) HAboveOrEqual(rhs, lhs);
case HInstruction::kAbove:
- return new (arena) HBelow(rhs, lhs);
+ return new (allocator) HBelow(rhs, lhs);
case HInstruction::kAboveOrEqual:
- return new (arena) HBelowOrEqual(rhs, lhs);
+ return new (allocator) HBelowOrEqual(rhs, lhs);
default:
LOG(FATAL) << "Unknown ConditionType " << cond->GetKind();
}
@@ -815,7 +837,9 @@
}
// Constructs a new ABS(x) node in the HIR.
-static HInstruction* NewIntegralAbs(ArenaAllocator* arena, HInstruction* x, HInstruction* cursor) {
+static HInstruction* NewIntegralAbs(ArenaAllocator* allocator,
+ HInstruction* x,
+ HInstruction* cursor) {
DataType::Type type = x->GetType();
DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64);
// Construct a fake intrinsic with as much context as is needed to allocate one.
@@ -826,8 +850,8 @@
HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
0u
};
- HInvokeStaticOrDirect* invoke = new (arena) HInvokeStaticOrDirect(
- arena,
+ HInvokeStaticOrDirect* invoke = new (allocator) HInvokeStaticOrDirect(
+ allocator,
1,
type,
x->GetDexPc(),
@@ -858,10 +882,11 @@
}
DataType::Type type1 = a->GetType();
DataType::Type type2 = b->GetType();
- return (type1 == DataType::Type::kInt8 && type2 == DataType::Type::kInt8) ||
- (type1 == DataType::Type::kInt16 && type2 == DataType::Type::kInt16) ||
- (type1 == DataType::Type::kUint16 && type2 == DataType::Type::kUint16) ||
- (type1 == DataType::Type::kInt32 && type2 == DataType::Type::kInt32 &&
+ return (type1 == DataType::Type::kUint8 && type2 == DataType::Type::kUint8) ||
+ (type1 == DataType::Type::kInt8 && type2 == DataType::Type::kInt8) ||
+ (type1 == DataType::Type::kInt16 && type2 == DataType::Type::kInt16) ||
+ (type1 == DataType::Type::kUint16 && type2 == DataType::Type::kUint16) ||
+ (type1 == DataType::Type::kInt32 && type2 == DataType::Type::kInt32 &&
to_type == DataType::Type::kInt64);
}
@@ -917,14 +942,14 @@
if ((cmp == kCondLT || cmp == kCondLE) &&
(a == negated && a == false_value && IsInt64Value(b, 0))) {
// Found a < 0 ? -a : a which can be replaced by ABS(a).
- replace_with = NewIntegralAbs(GetGraph()->GetArena(), false_value, select);
+ replace_with = NewIntegralAbs(GetGraph()->GetAllocator(), false_value, select);
}
} else if (false_value->IsNeg()) {
HInstruction* negated = false_value->InputAt(0);
if ((cmp == kCondGT || cmp == kCondGE) &&
(a == true_value && a == negated && IsInt64Value(b, 0))) {
// Found a > 0 ? a : -a which can be replaced by ABS(a).
- replace_with = NewIntegralAbs(GetGraph()->GetArena(), true_value, select);
+ replace_with = NewIntegralAbs(GetGraph()->GetAllocator(), true_value, select);
}
} else if (true_value->IsSub() && false_value->IsSub()) {
HInstruction* true_sub1 = true_value->InputAt(0);
@@ -939,7 +964,7 @@
// Found a > b ? a - b : b - a or
// a < b ? b - a : a - b
// which can be replaced by ABS(a - b) for lower precision operands a, b.
- replace_with = NewIntegralAbs(GetGraph()->GetArena(), true_value, select);
+ replace_with = NewIntegralAbs(GetGraph()->GetAllocator(), true_value, select);
}
}
}
@@ -1018,30 +1043,13 @@
}
}
-static bool IsTypeConversionImplicit(DataType::Type input_type, DataType::Type result_type) {
- // Invariant: We should never generate a conversion to a Boolean value.
- DCHECK_NE(DataType::Type::kBool, result_type);
-
- // Besides conversion to the same type, widening integral conversions are implicit,
- // excluding conversions to long and the byte->char conversion where we need to
- // clear the high 16 bits of the 32-bit sign-extended representation of byte.
- return result_type == input_type ||
- (result_type == DataType::Type::kInt32 && (input_type == DataType::Type::kBool ||
- input_type == DataType::Type::kInt8 ||
- input_type == DataType::Type::kInt16 ||
- input_type == DataType::Type::kUint16)) ||
- (result_type == DataType::Type::kUint16 && input_type == DataType::Type::kBool) ||
- (result_type == DataType::Type::kInt16 && (input_type == DataType::Type::kBool ||
- input_type == DataType::Type::kInt8)) ||
- (result_type == DataType::Type::kInt8 && input_type == DataType::Type::kBool);
-}
-
static bool IsTypeConversionLossless(DataType::Type input_type, DataType::Type result_type) {
// The conversion to a larger type is loss-less with the exception of two cases,
- // - conversion to Uint16, the only unsigned type, where we may lose some bits, and
+ // - conversion to the unsigned type Uint16, where we may lose some bits, and
// - conversion from float to long, the only FP to integral conversion with smaller FP type.
// For integral to FP conversions this holds because the FP mantissa is large enough.
- DCHECK_NE(input_type, result_type);
+ // Note: The size check excludes Uint8 as the result type.
+ DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type));
return DataType::Size(result_type) > DataType::Size(input_type) &&
result_type != DataType::Type::kUint16 &&
!(result_type == DataType::Type::kInt64 && input_type == DataType::Type::kFloat32);
@@ -1051,7 +1059,7 @@
HInstruction* input = instruction->GetInput();
DataType::Type input_type = input->GetType();
DataType::Type result_type = instruction->GetResultType();
- if (IsTypeConversionImplicit(input_type, result_type)) {
+ if (DataType::IsTypeConversionImplicit(input_type, result_type)) {
// Remove the implicit conversion; this includes conversion to the same type.
instruction->ReplaceWith(input);
instruction->GetBlock()->RemoveInstruction(instruction);
@@ -1080,7 +1088,7 @@
if (is_first_conversion_lossless || integral_conversions_with_non_widening_second) {
// If the merged conversion is implicit, do the simplification unconditionally.
- if (IsTypeConversionImplicit(original_type, result_type)) {
+ if (DataType::IsTypeConversionImplicit(original_type, result_type)) {
instruction->ReplaceWith(original_input);
instruction->GetBlock()->RemoveInstruction(instruction);
if (!input_conversion->HasUses()) {
@@ -1109,7 +1117,7 @@
if (trailing_ones >= kBitsPerByte * DataType::Size(result_type)) {
// The `HAnd` is useless, for example in `(byte) (x & 0xff)`, get rid of it.
HInstruction* original_input = input_and->GetLeastConstantLeft();
- if (IsTypeConversionImplicit(original_input->GetType(), result_type)) {
+ if (DataType::IsTypeConversionImplicit(original_input->GetType(), result_type)) {
instruction->ReplaceWith(original_input);
instruction->GetBlock()->RemoveInstruction(instruction);
RecordSimplification();
@@ -1168,7 +1176,8 @@
// particular, we do not want the live range of `b` to be extended if we are
// not sure the initial 'NEG' instruction can be removed.
HInstruction* other = left_is_neg ? right : left;
- HSub* sub = new(GetGraph()->GetArena()) HSub(instruction->GetType(), other, neg->GetInput());
+ HSub* sub =
+ new(GetGraph()->GetAllocator()) HSub(instruction->GetType(), other, neg->GetInput());
instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, sub);
RecordSimplification();
neg->GetBlock()->RemoveInstruction(neg);
@@ -1226,6 +1235,37 @@
RecordSimplification();
return;
}
+ if (input_other->IsTypeConversion() &&
+ input_other->GetType() == DataType::Type::kInt64 &&
+ DataType::IsIntegralType(input_other->InputAt(0)->GetType()) &&
+ IsInt<32>(value) &&
+ input_other->HasOnlyOneNonEnvironmentUse()) {
+ // The AND can be reordered before the TypeConversion. Replace
+ // LongConstant cst, <32-bit-constant-sign-extended-to-64-bits>
+ // TypeConversion<Int64> tmp, src
+ // AND dst, tmp, cst
+ // with
+ // IntConstant cst, <32-bit-constant>
+ // AND tmp, src, cst
+ // TypeConversion<Int64> dst, tmp
+ // This helps 32-bit targets and does not hurt 64-bit targets.
+ // This also simplifies detection of other patterns, such as Uint8 loads.
+ HInstruction* new_and_input = input_other->InputAt(0);
+ // Implicit conversion Int64->Int64 would have been removed previously.
+ DCHECK_NE(new_and_input->GetType(), DataType::Type::kInt64);
+ HConstant* new_const = GetGraph()->GetConstant(DataType::Type::kInt32, value);
+ HAnd* new_and =
+ new (GetGraph()->GetAllocator()) HAnd(DataType::Type::kInt32, new_and_input, new_const);
+ instruction->GetBlock()->InsertInstructionBefore(new_and, instruction);
+ HTypeConversion* new_conversion =
+ new (GetGraph()->GetAllocator()) HTypeConversion(DataType::Type::kInt64, new_and);
+ instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, new_conversion);
+ input_other->GetBlock()->RemoveInstruction(input_other);
+ RecordSimplification();
+ // Try to process the new And now, do not wait for the next round of simplifications.
+ instruction = new_and;
+ input_other = new_and_input;
+ }
// Eliminate And from UShr+And if the And-mask contains all the bits that
// can be non-zero after UShr. Transform Shr+And to UShr if the And-mask
// precisely clears the shifted-in sign bits.
@@ -1243,7 +1283,7 @@
input_other->HasOnlyOneNonEnvironmentUse()) {
DCHECK(input_other->IsShr()); // For UShr, we would have taken the branch above.
// Replace SHR+AND with USHR, for example "(x >> 24) & 0xff" -> "x >>> 24".
- HUShr* ushr = new (GetGraph()->GetArena()) HUShr(instruction->GetType(),
+ HUShr* ushr = new (GetGraph()->GetAllocator()) HUShr(instruction->GetType(),
input_other->InputAt(0),
input_other->InputAt(1),
input_other->GetDexPc());
@@ -1374,7 +1414,8 @@
// on the right hand side.
if (condition->GetLeft()->IsConstant() && !condition->GetRight()->IsConstant()) {
HBasicBlock* block = condition->GetBlock();
- HCondition* replacement = GetOppositeConditionSwapOps(block->GetGraph()->GetArena(), condition);
+ HCondition* replacement =
+ GetOppositeConditionSwapOps(block->GetGraph()->GetAllocator(), condition);
// If it is a fp we must set the opposite bias.
if (replacement != nullptr) {
if (condition->IsLtBias()) {
@@ -1470,7 +1511,7 @@
// with
// NEG dst, src
instruction->GetBlock()->ReplaceAndRemoveInstructionWith(
- instruction, new (GetGraph()->GetArena()) HNeg(type, input_other));
+ instruction, new (GetGraph()->GetAllocator()) HNeg(type, input_other));
RecordSimplification();
return;
}
@@ -1496,7 +1537,7 @@
if (reciprocal != nullptr) {
instruction->GetBlock()->ReplaceAndRemoveInstructionWith(
- instruction, new (GetGraph()->GetArena()) HMul(type, input_other, reciprocal));
+ instruction, new (GetGraph()->GetAllocator()) HMul(type, input_other, reciprocal));
RecordSimplification();
return;
}
@@ -1508,7 +1549,7 @@
HInstruction* input_other = instruction->GetLeastConstantLeft();
DataType::Type type = instruction->GetType();
HBasicBlock* block = instruction->GetBlock();
- ArenaAllocator* allocator = GetGraph()->GetArena();
+ ArenaAllocator* allocator = GetGraph()->GetAllocator();
if (input_cst == nullptr) {
return;
@@ -1647,8 +1688,8 @@
// removed.
// We do not perform optimization for fp because we could lose the sign of zero.
HSub* sub = input->AsSub();
- HSub* new_sub =
- new (GetGraph()->GetArena()) HSub(instruction->GetType(), sub->GetRight(), sub->GetLeft());
+ HSub* new_sub = new (GetGraph()->GetAllocator()) HSub(
+ instruction->GetType(), sub->GetRight(), sub->GetLeft());
instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, new_sub);
if (!sub->HasUses()) {
sub->GetBlock()->RemoveInstruction(sub);
@@ -1750,7 +1791,7 @@
}
HBasicBlock* block = instruction->GetBlock();
- ArenaAllocator* allocator = GetGraph()->GetArena();
+ ArenaAllocator* allocator = GetGraph()->GetAllocator();
HInstruction* left = instruction->GetLeft();
HInstruction* right = instruction->GetRight();
@@ -1782,7 +1823,7 @@
// SUB dst, a, tmp
// with
// ADD dst, a, b
- HAdd* add = new(GetGraph()->GetArena()) HAdd(type, left, right->AsNeg()->GetInput());
+ HAdd* add = new(GetGraph()->GetAllocator()) HAdd(type, left, right->AsNeg()->GetInput());
instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, add);
RecordSimplification();
right->GetBlock()->RemoveInstruction(right);
@@ -1798,9 +1839,9 @@
// NEG dst, tmp
// The second version is not intrinsically better, but enables more
// transformations.
- HAdd* add = new(GetGraph()->GetArena()) HAdd(type, left->AsNeg()->GetInput(), right);
+ HAdd* add = new(GetGraph()->GetAllocator()) HAdd(type, left->AsNeg()->GetInput(), right);
instruction->GetBlock()->InsertInstructionBefore(add, instruction);
- HNeg* neg = new (GetGraph()->GetArena()) HNeg(instruction->GetType(), add);
+ HNeg* neg = new (GetGraph()->GetAllocator()) HNeg(instruction->GetType(), add);
instruction->GetBlock()->InsertInstructionBefore(neg, instruction);
instruction->ReplaceWith(neg);
instruction->GetBlock()->RemoveInstruction(instruction);
@@ -1862,7 +1903,7 @@
// XOR dst, src, 1
// with
// BOOLEAN_NOT dst, src
- HBooleanNot* boolean_not = new (GetGraph()->GetArena()) HBooleanNot(input_other);
+ HBooleanNot* boolean_not = new (GetGraph()->GetAllocator()) HBooleanNot(input_other);
instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, boolean_not);
RecordSimplification();
return;
@@ -1873,7 +1914,7 @@
// XOR dst, src, 0xFFF...FF
// with
// NOT dst, src
- HNot* bitwise_not = new (GetGraph()->GetArena()) HNot(instruction->GetType(), input_other);
+ HNot* bitwise_not = new (GetGraph()->GetAllocator()) HNot(instruction->GetType(), input_other);
instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, bitwise_not);
RecordSimplification();
return;
@@ -1944,10 +1985,10 @@
// Unconditionally set the type of the negated distance to `int`,
// as shift and rotate operations expect a 32-bit (or narrower)
// value for their distance input.
- distance = new (GetGraph()->GetArena()) HNeg(DataType::Type::kInt32, distance);
+ distance = new (GetGraph()->GetAllocator()) HNeg(DataType::Type::kInt32, distance);
invoke->GetBlock()->InsertInstructionBefore(distance, invoke);
}
- HRor* ror = new (GetGraph()->GetArena()) HRor(type, value, distance);
+ HRor* ror = new (GetGraph()->GetAllocator()) HRor(type, value, distance);
invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, ror);
// Remove ClinitCheck and LoadClass, if possible.
HInstruction* clinit = invoke->GetInputs().back();
@@ -2091,7 +2132,7 @@
} else {
right = GetGraph()->GetIntConstant(0);
}
- HCompare* compare = new (GetGraph()->GetArena())
+ HCompare* compare = new (GetGraph()->GetAllocator())
HCompare(type, left, right, ComparisonBias::kNoBias, dex_pc);
invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, compare);
}
@@ -2101,7 +2142,7 @@
uint32_t dex_pc = invoke->GetDexPc();
// IsNaN(x) is the same as x != x.
HInstruction* x = invoke->InputAt(0);
- HCondition* condition = new (GetGraph()->GetArena()) HNotEqual(x, x, dex_pc);
+ HCondition* condition = new (GetGraph()->GetAllocator()) HNotEqual(x, x, dex_pc);
condition->SetBias(ComparisonBias::kLtBias);
invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, condition);
}
@@ -2128,11 +2169,11 @@
kNoThrow);
}
// Test IsNaN(x), which is the same as x != x.
- HCondition* condition = new (GetGraph()->GetArena()) HNotEqual(x, x, dex_pc);
+ HCondition* condition = new (GetGraph()->GetAllocator()) HNotEqual(x, x, dex_pc);
condition->SetBias(ComparisonBias::kLtBias);
invoke->GetBlock()->InsertInstructionBefore(condition, invoke->GetNext());
// Select between the two.
- HInstruction* select = new (GetGraph()->GetArena()) HSelect(condition, nan, invoke, dex_pc);
+ HInstruction* select = new (GetGraph()->GetAllocator()) HSelect(condition, nan, invoke, dex_pc);
invoke->GetBlock()->InsertInstructionBefore(select, condition->GetNext());
invoke->ReplaceWithExceptInReplacementAtIndex(select, 0); // false at index 0
}
@@ -2141,16 +2182,20 @@
HInstruction* str = invoke->InputAt(0);
HInstruction* index = invoke->InputAt(1);
uint32_t dex_pc = invoke->GetDexPc();
- ArenaAllocator* arena = GetGraph()->GetArena();
+ ArenaAllocator* allocator = GetGraph()->GetAllocator();
// We treat String as an array to allow DCE and BCE to seamlessly work on strings,
// so create the HArrayLength, HBoundsCheck and HArrayGet.
- HArrayLength* length = new (arena) HArrayLength(str, dex_pc, /* is_string_length */ true);
+ HArrayLength* length = new (allocator) HArrayLength(str, dex_pc, /* is_string_length */ true);
invoke->GetBlock()->InsertInstructionBefore(length, invoke);
- HBoundsCheck* bounds_check = new (arena) HBoundsCheck(
+ HBoundsCheck* bounds_check = new (allocator) HBoundsCheck(
index, length, dex_pc, invoke->GetDexMethodIndex());
invoke->GetBlock()->InsertInstructionBefore(bounds_check, invoke);
- HArrayGet* array_get = new (arena) HArrayGet(
- str, bounds_check, DataType::Type::kUint16, dex_pc, /* is_string_char_at */ true);
+ HArrayGet* array_get = new (allocator) HArrayGet(str,
+ bounds_check,
+ DataType::Type::kUint16,
+ SideEffects::None(), // Strings are immutable.
+ dex_pc,
+ /* is_string_char_at */ true);
invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, array_get);
bounds_check->CopyEnvironmentFrom(invoke->GetEnvironment());
GetGraph()->SetHasBoundsChecks(true);
@@ -2162,13 +2207,13 @@
// We treat String as an array to allow DCE and BCE to seamlessly work on strings,
// so create the HArrayLength.
HArrayLength* length =
- new (GetGraph()->GetArena()) HArrayLength(str, dex_pc, /* is_string_length */ true);
+ new (GetGraph()->GetAllocator()) HArrayLength(str, dex_pc, /* is_string_length */ true);
HInstruction* replacement;
if (invoke->GetIntrinsic() == Intrinsics::kStringIsEmpty) {
// For String.isEmpty(), create the `HEqual` representing the `length == 0`.
invoke->GetBlock()->InsertInstructionBefore(length, invoke);
HIntConstant* zero = GetGraph()->GetIntConstant(0);
- HEqual* equal = new (GetGraph()->GetArena()) HEqual(length, zero, dex_pc);
+ HEqual* equal = new (GetGraph()->GetAllocator()) HEqual(length, zero, dex_pc);
replacement = equal;
} else {
DCHECK_EQ(invoke->GetIntrinsic(), Intrinsics::kStringLength);
@@ -2238,9 +2283,11 @@
}
}
-void InstructionSimplifierVisitor::SimplifyMemBarrier(HInvoke* invoke, MemBarrierKind barrier_kind) {
+void InstructionSimplifierVisitor::SimplifyMemBarrier(HInvoke* invoke,
+ MemBarrierKind barrier_kind) {
uint32_t dex_pc = invoke->GetDexPc();
- HMemoryBarrier* mem_barrier = new (GetGraph()->GetArena()) HMemoryBarrier(barrier_kind, dex_pc);
+ HMemoryBarrier* mem_barrier =
+ new (GetGraph()->GetAllocator()) HMemoryBarrier(barrier_kind, dex_pc);
invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, mem_barrier);
}
@@ -2312,6 +2359,21 @@
case Intrinsics::kUnsafeFullFence:
SimplifyMemBarrier(instruction, MemBarrierKind::kAnyAny);
break;
+ case Intrinsics::kVarHandleFullFence:
+ SimplifyMemBarrier(instruction, MemBarrierKind::kAnyAny);
+ break;
+ case Intrinsics::kVarHandleAcquireFence:
+ SimplifyMemBarrier(instruction, MemBarrierKind::kLoadAny);
+ break;
+ case Intrinsics::kVarHandleReleaseFence:
+ SimplifyMemBarrier(instruction, MemBarrierKind::kAnyStore);
+ break;
+ case Intrinsics::kVarHandleLoadLoadFence:
+ SimplifyMemBarrier(instruction, MemBarrierKind::kLoadAny);
+ break;
+ case Intrinsics::kVarHandleStoreStoreFence:
+ SimplifyMemBarrier(instruction, MemBarrierKind::kStoreStore);
+ break;
default:
break;
}
@@ -2464,13 +2526,13 @@
int64_t const3_val = ComputeAddition(type, const1_val, const2_val);
HBasicBlock* block = instruction->GetBlock();
HConstant* const3 = block->GetGraph()->GetConstant(type, const3_val);
- ArenaAllocator* arena = instruction->GetArena();
+ ArenaAllocator* allocator = instruction->GetAllocator();
HInstruction* z;
if (is_x_negated) {
- z = new (arena) HSub(type, const3, x, instruction->GetDexPc());
+ z = new (allocator) HSub(type, const3, x, instruction->GetDexPc());
} else {
- z = new (arena) HAdd(type, x, const3, instruction->GetDexPc());
+ z = new (allocator) HAdd(type, x, const3, instruction->GetDexPc());
}
block->ReplaceAndRemoveInstructionWith(instruction, z);
diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc
index efd7cb4..9422f9f 100644
--- a/compiler/optimizing/instruction_simplifier_arm.cc
+++ b/compiler/optimizing/instruction_simplifier_arm.cc
@@ -30,6 +30,57 @@
namespace arm {
+class InstructionSimplifierArmVisitor : public HGraphVisitor {
+ public:
+ InstructionSimplifierArmVisitor(HGraph* graph, OptimizingCompilerStats* stats)
+ : HGraphVisitor(graph), stats_(stats) {}
+
+ private:
+ void RecordSimplification() {
+ if (stats_ != nullptr) {
+ stats_->RecordStat(kInstructionSimplificationsArch);
+ }
+ }
+
+ bool TryMergeIntoUsersShifterOperand(HInstruction* instruction);
+ bool TryMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op, bool do_merge);
+ bool CanMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
+ return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ false);
+ }
+ bool MergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
+ DCHECK(CanMergeIntoShifterOperand(use, bitfield_op));
+ return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ true);
+ }
+
+ /**
+ * This simplifier uses a special-purpose BB visitor.
+ * (1) No need to visit Phi nodes.
+ * (2) Since statements can be removed in a "forward" fashion,
+ * the visitor should test if each statement is still there.
+ */
+ void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+ // TODO: fragile iteration, provide more robust iterators?
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ HInstruction* instruction = it.Current();
+ if (instruction->IsInBlock()) {
+ instruction->Accept(this);
+ }
+ }
+ }
+
+ void VisitAnd(HAnd* instruction) OVERRIDE;
+ void VisitArrayGet(HArrayGet* instruction) OVERRIDE;
+ void VisitArraySet(HArraySet* instruction) OVERRIDE;
+ void VisitMul(HMul* instruction) OVERRIDE;
+ void VisitOr(HOr* instruction) OVERRIDE;
+ void VisitShl(HShl* instruction) OVERRIDE;
+ void VisitShr(HShr* instruction) OVERRIDE;
+ void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE;
+ void VisitUShr(HUShr* instruction) OVERRIDE;
+
+ OptimizingCompilerStats* stats_;
+};
+
bool InstructionSimplifierArmVisitor::TryMergeIntoShifterOperand(HInstruction* use,
HInstruction* bitfield_op,
bool do_merge) {
@@ -86,12 +137,12 @@
if (do_merge) {
HDataProcWithShifterOp* alu_with_op =
- new (GetGraph()->GetArena()) HDataProcWithShifterOp(use,
- other_input,
- bitfield_op->InputAt(0),
- op_kind,
- shift_amount,
- use->GetDexPc());
+ new (GetGraph()->GetAllocator()) HDataProcWithShifterOp(use,
+ other_input,
+ bitfield_op->InputAt(0),
+ op_kind,
+ shift_amount,
+ use->GetDexPc());
use->GetBlock()->ReplaceAndRemoveInstructionWith(use, alu_with_op);
if (bitfield_op->GetUses().empty()) {
bitfield_op->GetBlock()->RemoveInstruction(bitfield_op);
@@ -234,5 +285,10 @@
}
}
+void InstructionSimplifierArm::Run() {
+ InstructionSimplifierArmVisitor visitor(graph_, stats_);
+ visitor.VisitReversePostOrder();
+}
+
} // namespace arm
} // namespace art
diff --git a/compiler/optimizing/instruction_simplifier_arm.h b/compiler/optimizing/instruction_simplifier_arm.h
index e2ed257..2f65729 100644
--- a/compiler/optimizing/instruction_simplifier_arm.h
+++ b/compiler/optimizing/instruction_simplifier_arm.h
@@ -23,58 +23,6 @@
namespace art {
namespace arm {
-class InstructionSimplifierArmVisitor : public HGraphVisitor {
- public:
- InstructionSimplifierArmVisitor(HGraph* graph, OptimizingCompilerStats* stats)
- : HGraphVisitor(graph), stats_(stats) {}
-
- private:
- void RecordSimplification() {
- if (stats_ != nullptr) {
- stats_->RecordStat(kInstructionSimplificationsArch);
- }
- }
-
- bool TryMergeIntoUsersShifterOperand(HInstruction* instruction);
- bool TryMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op, bool do_merge);
- bool CanMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
- return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ false);
- }
- bool MergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
- DCHECK(CanMergeIntoShifterOperand(use, bitfield_op));
- return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ true);
- }
-
- /**
- * This simplifier uses a special-purpose BB visitor.
- * (1) No need to visit Phi nodes.
- * (2) Since statements can be removed in a "forward" fashion,
- * the visitor should test if each statement is still there.
- */
- void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
- // TODO: fragile iteration, provide more robust iterators?
- for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- HInstruction* instruction = it.Current();
- if (instruction->IsInBlock()) {
- instruction->Accept(this);
- }
- }
- }
-
- void VisitAnd(HAnd* instruction) OVERRIDE;
- void VisitArrayGet(HArrayGet* instruction) OVERRIDE;
- void VisitArraySet(HArraySet* instruction) OVERRIDE;
- void VisitMul(HMul* instruction) OVERRIDE;
- void VisitOr(HOr* instruction) OVERRIDE;
- void VisitShl(HShl* instruction) OVERRIDE;
- void VisitShr(HShr* instruction) OVERRIDE;
- void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE;
- void VisitUShr(HUShr* instruction) OVERRIDE;
-
- OptimizingCompilerStats* stats_;
-};
-
-
class InstructionSimplifierArm : public HOptimization {
public:
InstructionSimplifierArm(HGraph* graph, OptimizingCompilerStats* stats)
@@ -82,10 +30,7 @@
static constexpr const char* kInstructionSimplifierArmPassName = "instruction_simplifier_arm";
- void Run() OVERRIDE {
- InstructionSimplifierArmVisitor visitor(graph_, stats_);
- visitor.VisitReversePostOrder();
- }
+ void Run() OVERRIDE;
};
} // namespace arm
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index 1c3b79d..c0ab68f 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -30,6 +30,63 @@
using helpers::ShifterOperandSupportsExtension;
+class InstructionSimplifierArm64Visitor : public HGraphVisitor {
+ public:
+ InstructionSimplifierArm64Visitor(HGraph* graph, OptimizingCompilerStats* stats)
+ : HGraphVisitor(graph), stats_(stats) {}
+
+ private:
+ void RecordSimplification() {
+ if (stats_ != nullptr) {
+ stats_->RecordStat(kInstructionSimplificationsArch);
+ }
+ }
+
+ bool TryMergeIntoUsersShifterOperand(HInstruction* instruction);
+ bool TryMergeIntoShifterOperand(HInstruction* use,
+ HInstruction* bitfield_op,
+ bool do_merge);
+ bool CanMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
+ return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ false);
+ }
+ bool MergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
+ DCHECK(CanMergeIntoShifterOperand(use, bitfield_op));
+ return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ true);
+ }
+
+ /**
+ * This simplifier uses a special-purpose BB visitor.
+ * (1) No need to visit Phi nodes.
+ * (2) Since statements can be removed in a "forward" fashion,
+ * the visitor should test if each statement is still there.
+ */
+ void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+ // TODO: fragile iteration, provide more robust iterators?
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ HInstruction* instruction = it.Current();
+ if (instruction->IsInBlock()) {
+ instruction->Accept(this);
+ }
+ }
+ }
+
+ // HInstruction visitors, sorted alphabetically.
+ void VisitAnd(HAnd* instruction) OVERRIDE;
+ void VisitArrayGet(HArrayGet* instruction) OVERRIDE;
+ void VisitArraySet(HArraySet* instruction) OVERRIDE;
+ void VisitMul(HMul* instruction) OVERRIDE;
+ void VisitOr(HOr* instruction) OVERRIDE;
+ void VisitShl(HShl* instruction) OVERRIDE;
+ void VisitShr(HShr* instruction) OVERRIDE;
+ void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE;
+ void VisitUShr(HUShr* instruction) OVERRIDE;
+ void VisitXor(HXor* instruction) OVERRIDE;
+ void VisitVecLoad(HVecLoad* instruction) OVERRIDE;
+ void VisitVecStore(HVecStore* instruction) OVERRIDE;
+
+ OptimizingCompilerStats* stats_;
+};
+
bool InstructionSimplifierArm64Visitor::TryMergeIntoShifterOperand(HInstruction* use,
HInstruction* bitfield_op,
bool do_merge) {
@@ -84,12 +141,12 @@
if (do_merge) {
HDataProcWithShifterOp* alu_with_op =
- new (GetGraph()->GetArena()) HDataProcWithShifterOp(use,
- other_input,
- bitfield_op->InputAt(0),
- op_kind,
- shift_amount,
- use->GetDexPc());
+ new (GetGraph()->GetAllocator()) HDataProcWithShifterOp(use,
+ other_input,
+ bitfield_op->InputAt(0),
+ op_kind,
+ shift_amount,
+ use->GetDexPc());
use->GetBlock()->ReplaceAndRemoveInstructionWith(use, alu_with_op);
if (bitfield_op->GetUses().empty()) {
bitfield_op->GetBlock()->RemoveInstruction(bitfield_op);
@@ -223,5 +280,10 @@
}
}
+void InstructionSimplifierArm64::Run() {
+ InstructionSimplifierArm64Visitor visitor(graph_, stats_);
+ visitor.VisitReversePostOrder();
+}
+
} // namespace arm64
} // namespace art
diff --git a/compiler/optimizing/instruction_simplifier_arm64.h b/compiler/optimizing/instruction_simplifier_arm64.h
index 4f16fc3..d180a8d 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.h
+++ b/compiler/optimizing/instruction_simplifier_arm64.h
@@ -23,64 +23,6 @@
namespace art {
namespace arm64 {
-class InstructionSimplifierArm64Visitor : public HGraphVisitor {
- public:
- InstructionSimplifierArm64Visitor(HGraph* graph, OptimizingCompilerStats* stats)
- : HGraphVisitor(graph), stats_(stats) {}
-
- private:
- void RecordSimplification() {
- if (stats_ != nullptr) {
- stats_->RecordStat(kInstructionSimplificationsArch);
- }
- }
-
- bool TryMergeIntoUsersShifterOperand(HInstruction* instruction);
- bool TryMergeIntoShifterOperand(HInstruction* use,
- HInstruction* bitfield_op,
- bool do_merge);
- bool CanMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
- return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ false);
- }
- bool MergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
- DCHECK(CanMergeIntoShifterOperand(use, bitfield_op));
- return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ true);
- }
-
- /**
- * This simplifier uses a special-purpose BB visitor.
- * (1) No need to visit Phi nodes.
- * (2) Since statements can be removed in a "forward" fashion,
- * the visitor should test if each statement is still there.
- */
- void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
- // TODO: fragile iteration, provide more robust iterators?
- for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- HInstruction* instruction = it.Current();
- if (instruction->IsInBlock()) {
- instruction->Accept(this);
- }
- }
- }
-
- // HInstruction visitors, sorted alphabetically.
- void VisitAnd(HAnd* instruction) OVERRIDE;
- void VisitArrayGet(HArrayGet* instruction) OVERRIDE;
- void VisitArraySet(HArraySet* instruction) OVERRIDE;
- void VisitMul(HMul* instruction) OVERRIDE;
- void VisitOr(HOr* instruction) OVERRIDE;
- void VisitShl(HShl* instruction) OVERRIDE;
- void VisitShr(HShr* instruction) OVERRIDE;
- void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE;
- void VisitUShr(HUShr* instruction) OVERRIDE;
- void VisitXor(HXor* instruction) OVERRIDE;
- void VisitVecLoad(HVecLoad* instruction) OVERRIDE;
- void VisitVecStore(HVecStore* instruction) OVERRIDE;
-
- OptimizingCompilerStats* stats_;
-};
-
-
class InstructionSimplifierArm64 : public HOptimization {
public:
InstructionSimplifierArm64(HGraph* graph, OptimizingCompilerStats* stats)
@@ -88,10 +30,7 @@
static constexpr const char* kInstructionSimplifierArm64PassName = "instruction_simplifier_arm64";
- void Run() OVERRIDE {
- InstructionSimplifierArm64Visitor visitor(graph_, stats_);
- visitor.VisitReversePostOrder();
- }
+ void Run() OVERRIDE;
};
} // namespace arm64
diff --git a/compiler/optimizing/instruction_simplifier_mips.cc b/compiler/optimizing/instruction_simplifier_mips.cc
new file mode 100644
index 0000000..6a0d8a6
--- /dev/null
+++ b/compiler/optimizing/instruction_simplifier_mips.cc
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_simplifier_mips.h"
+
+#include "arch/mips/instruction_set_features_mips.h"
+#include "mirror/array-inl.h"
+
+namespace art {
+namespace mips {
+
+class InstructionSimplifierMipsVisitor : public HGraphVisitor {
+ public:
+ InstructionSimplifierMipsVisitor(HGraph* graph,
+ CodeGenerator* codegen,
+ OptimizingCompilerStats* stats)
+ : HGraphVisitor(graph),
+ stats_(stats),
+ codegen_(down_cast<CodeGeneratorMIPS*>(codegen)) {}
+
+ private:
+ void RecordSimplification() {
+ if (stats_ != nullptr) {
+ stats_->RecordStat(kInstructionSimplificationsArch);
+ }
+ }
+
+ bool TryExtractArrayAccessIndex(HInstruction* access,
+ HInstruction* index,
+ DataType::Type packed_type);
+ void VisitArrayGet(HArrayGet* instruction) OVERRIDE;
+ void VisitArraySet(HArraySet* instruction) OVERRIDE;
+
+ OptimizingCompilerStats* stats_;
+ CodeGeneratorMIPS* codegen_;
+};
+
+bool InstructionSimplifierMipsVisitor::TryExtractArrayAccessIndex(HInstruction* access,
+ HInstruction* index,
+ DataType::Type packed_type) {
+ if (codegen_->GetInstructionSetFeatures().IsR6() ||
+ codegen_->GetInstructionSetFeatures().HasMsa()) {
+ return false;
+ }
+ if (index->IsConstant() ||
+ (index->IsBoundsCheck() && index->AsBoundsCheck()->GetIndex()->IsConstant())) {
+ // If index is constant the whole address calculation often can be done by load/store
+ // instructions themselves.
+ // TODO: Treat the case with non-embeddable constants.
+ return false;
+ }
+
+ if (packed_type != DataType::Type::kInt16 && packed_type != DataType::Type::kUint16 &&
+ packed_type != DataType::Type::kInt32 && packed_type != DataType::Type::kInt64 &&
+ packed_type != DataType::Type::kFloat32 && packed_type != DataType::Type::kFloat64) {
+ return false;
+ }
+
+ if (access->IsArrayGet() && access->AsArrayGet()->IsStringCharAt()) {
+ return false;
+ }
+
+ HGraph* graph = access->GetBlock()->GetGraph();
+ ArenaAllocator* allocator = graph->GetAllocator();
+ size_t component_shift = DataType::SizeShift(packed_type);
+
+ bool is_extracting_beneficial = false;
+ // It is beneficial to extract index intermediate address only if there are at least 2 users.
+ for (const HUseListNode<HInstruction*>& use : index->GetUses()) {
+ HInstruction* user = use.GetUser();
+ if (user->IsArrayGet() && user != access && !user->AsArrayGet()->IsStringCharAt()) {
+ HArrayGet* another_access = user->AsArrayGet();
+ DataType::Type another_packed_type = another_access->GetType();
+ size_t another_component_shift = DataType::SizeShift(another_packed_type);
+ if (another_component_shift == component_shift) {
+ is_extracting_beneficial = true;
+ break;
+ }
+ } else if (user->IsArraySet() && user != access) {
+ HArraySet* another_access = user->AsArraySet();
+ DataType::Type another_packed_type = another_access->GetType();
+ size_t another_component_shift = DataType::SizeShift(another_packed_type);
+ if (another_component_shift == component_shift) {
+ is_extracting_beneficial = true;
+ break;
+ }
+ } else if (user->IsIntermediateArrayAddressIndex()) {
+ HIntermediateArrayAddressIndex* another_access = user->AsIntermediateArrayAddressIndex();
+ size_t another_component_shift = another_access->GetShift()->AsIntConstant()->GetValue();
+ if (another_component_shift == component_shift) {
+ is_extracting_beneficial = true;
+ break;
+ }
+ }
+ }
+
+ if (!is_extracting_beneficial) {
+ return false;
+ }
+
+ HIntConstant* shift = graph->GetIntConstant(component_shift);
+ HIntermediateArrayAddressIndex* address =
+ new (allocator) HIntermediateArrayAddressIndex(index, shift, kNoDexPc);
+ access->GetBlock()->InsertInstructionBefore(address, access);
+ access->ReplaceInput(address, 1);
+ return true;
+}
+
+void InstructionSimplifierMipsVisitor::VisitArrayGet(HArrayGet* instruction) {
+ DataType::Type packed_type = instruction->GetType();
+ if (TryExtractArrayAccessIndex(instruction, instruction->GetIndex(), packed_type)) {
+ RecordSimplification();
+ }
+}
+
+void InstructionSimplifierMipsVisitor::VisitArraySet(HArraySet* instruction) {
+ DataType::Type packed_type = instruction->GetComponentType();
+ if (TryExtractArrayAccessIndex(instruction, instruction->GetIndex(), packed_type)) {
+ RecordSimplification();
+ }
+}
+
+void InstructionSimplifierMips::Run() {
+ InstructionSimplifierMipsVisitor visitor(graph_, codegen_, stats_);
+ visitor.VisitReversePostOrder();
+}
+
+} // namespace mips
+} // namespace art
diff --git a/compiler/optimizing/instruction_simplifier_mips.h b/compiler/optimizing/instruction_simplifier_mips.h
new file mode 100644
index 0000000..22cc2ef
--- /dev/null
+++ b/compiler/optimizing/instruction_simplifier_mips.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_MIPS_H_
+#define ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_MIPS_H_
+
+#include "nodes.h"
+#include "optimization.h"
+#include "code_generator_mips.h"
+
+namespace art {
+
+class CodeGenerator;
+
+namespace mips {
+
+class InstructionSimplifierMips : public HOptimization {
+ public:
+ InstructionSimplifierMips(HGraph* graph, CodeGenerator* codegen, OptimizingCompilerStats* stats)
+ : HOptimization(graph, "instruction_simplifier_mips", stats),
+ codegen_(down_cast<CodeGeneratorMIPS*>(codegen)) {}
+
+ static constexpr const char* kInstructionSimplifierMipsPassName = "instruction_simplifier_mips";
+
+ void Run() OVERRIDE;
+
+ private:
+ CodeGeneratorMIPS* codegen_;
+};
+
+} // namespace mips
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_MIPS_H_
diff --git a/compiler/optimizing/instruction_simplifier_shared.cc b/compiler/optimizing/instruction_simplifier_shared.cc
index 73d866f..1c13084 100644
--- a/compiler/optimizing/instruction_simplifier_shared.cc
+++ b/compiler/optimizing/instruction_simplifier_shared.cc
@@ -75,8 +75,8 @@
return false;
}
- ArenaAllocator* arena = mul->GetBlock()->GetGraph()->GetArena();
- HMultiplyAccumulate* mulacc = new(arena) HMultiplyAccumulate(
+ ArenaAllocator* allocator = mul->GetBlock()->GetGraph()->GetAllocator();
+ HMultiplyAccumulate* mulacc = new (allocator) HMultiplyAccumulate(
mul->GetType(), op_kind, input_a, input_a, input_b, mul->GetDexPc());
mul->GetBlock()->ReplaceAndRemoveInstructionWith(mul, mulacc);
@@ -105,7 +105,7 @@
return false;
}
- ArenaAllocator* arena = mul->GetBlock()->GetGraph()->GetArena();
+ ArenaAllocator* allocator = mul->GetBlock()->GetGraph()->GetAllocator();
if (mul->HasOnlyOneNonEnvironmentUse()) {
HInstruction* use = mul->GetUses().front().GetUser();
@@ -137,11 +137,11 @@
if (accumulator != nullptr) {
HMultiplyAccumulate* mulacc =
- new (arena) HMultiplyAccumulate(type,
- binop->GetKind(),
- accumulator,
- mul->GetLeft(),
- mul->GetRight());
+ new (allocator) HMultiplyAccumulate(type,
+ binop->GetKind(),
+ accumulator,
+ mul->GetLeft(),
+ mul->GetRight());
binop->GetBlock()->ReplaceAndRemoveInstructionWith(binop, mulacc);
DCHECK(!mul->HasUses());
@@ -150,11 +150,11 @@
}
} else if (use->IsNeg() && isa != kArm) {
HMultiplyAccumulate* mulacc =
- new (arena) HMultiplyAccumulate(type,
- HInstruction::kSub,
- mul->GetBlock()->GetGraph()->GetConstant(type, 0),
- mul->GetLeft(),
- mul->GetRight());
+ new (allocator) HMultiplyAccumulate(type,
+ HInstruction::kSub,
+ mul->GetBlock()->GetGraph()->GetConstant(type, 0),
+ mul->GetLeft(),
+ mul->GetRight());
use->GetBlock()->ReplaceAndRemoveInstructionWith(use, mulacc);
DCHECK(!mul->HasUses());
@@ -216,7 +216,7 @@
// BIC dst, src, mask (respectively ORN, EON)
HInstruction* src = hnot->AsNot()->GetInput();
- HBitwiseNegatedRight* neg_op = new (hnot->GetBlock()->GetGraph()->GetArena())
+ HBitwiseNegatedRight* neg_op = new (hnot->GetBlock()->GetGraph()->GetAllocator())
HBitwiseNegatedRight(op->GetType(), op->GetKind(), hother, src, op->GetDexPc());
op->GetBlock()->ReplaceAndRemoveInstructionWith(op, neg_op);
@@ -255,10 +255,10 @@
// Proceed to extract the base address computation.
HGraph* graph = access->GetBlock()->GetGraph();
- ArenaAllocator* arena = graph->GetArena();
+ ArenaAllocator* allocator = graph->GetAllocator();
HIntConstant* offset = graph->GetIntConstant(data_offset);
- HIntermediateAddress* address = new (arena) HIntermediateAddress(array, offset, kNoDexPc);
+ HIntermediateAddress* address = new (allocator) HIntermediateAddress(array, offset, kNoDexPc);
// TODO: Is it ok to not have this on the intermediate address?
// address->SetReferenceTypeInfo(array->GetReferenceTypeInfo());
access->GetBlock()->InsertInstructionBefore(address, access);
@@ -289,7 +289,7 @@
}
HGraph* graph = access->GetBlock()->GetGraph();
- ArenaAllocator* arena = graph->GetArena();
+ ArenaAllocator* allocator = graph->GetAllocator();
DataType::Type packed_type = access->GetPackedType();
uint32_t data_offset = mirror::Array::DataOffset(
DataType::Size(packed_type)).Uint32Value();
@@ -328,7 +328,7 @@
HIntConstant* offset = graph->GetIntConstant(data_offset);
HIntConstant* shift = graph->GetIntConstant(component_shift);
HIntermediateAddressIndex* address =
- new (arena) HIntermediateAddressIndex(index, offset, shift, kNoDexPc);
+ new (allocator) HIntermediateAddressIndex(index, offset, shift, kNoDexPc);
access->GetBlock()->InsertInstructionBefore(address, access);
access->ReplaceInput(address, 1);
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 11725f4..dfae534 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -30,6 +30,16 @@
namespace art {
+// Check that intrinsic enum values fit within space set aside in ArtMethod modifier flags.
+#define CHECK_INTRINSICS_ENUM_VALUES(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
+ static_assert( \
+ static_cast<uint32_t>(Intrinsics::k ## Name) <= (kAccIntrinsicBits >> CTZ(kAccIntrinsicBits)), \
+ "Instrinsics enumeration space overflow.");
+#include "intrinsics_list.h"
+ INTRINSICS_LIST(CHECK_INTRINSICS_ENUM_VALUES)
+#undef INTRINSICS_LIST
+#undef CHECK_INTRINSICS_ENUM_VALUES
+
// Function that returns whether an intrinsic is static/direct or virtual.
static inline InvokeType GetIntrinsicInvokeType(Intrinsics i) {
switch (i) {
@@ -109,6 +119,7 @@
// InvokeStaticOrDirect.
InvokeType intrinsic_type = GetIntrinsicInvokeType(intrinsic);
InvokeType invoke_type = invoke->GetInvokeType();
+
switch (intrinsic_type) {
case kStatic:
return (invoke_type == kStatic);
@@ -209,7 +220,7 @@
}
// The intrinsic will call if it needs to allocate a j.l.Integer.
- LocationSummary* locations = new (invoke->GetBlock()->GetGraph()->GetArena()) LocationSummary(
+ LocationSummary* locations = new (invoke->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
if (!invoke->InputAt(0)->IsConstant()) {
locations->SetInAt(0, Location::RequiresRegister());
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 6411e82..4429e6e 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -100,7 +100,7 @@
// We're moving potentially two or more locations to locations that could overlap, so we need
// a parallel move resolver.
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
HInstruction* input = invoke->InputAt(i);
@@ -256,25 +256,30 @@
LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic() \
<< " should have been converted to HIR"; \
}
-#define UNREACHABLE_INTRINSICS(Arch) \
-UNREACHABLE_INTRINSIC(Arch, FloatFloatToIntBits) \
-UNREACHABLE_INTRINSIC(Arch, DoubleDoubleToLongBits) \
-UNREACHABLE_INTRINSIC(Arch, FloatIsNaN) \
-UNREACHABLE_INTRINSIC(Arch, DoubleIsNaN) \
-UNREACHABLE_INTRINSIC(Arch, IntegerRotateLeft) \
-UNREACHABLE_INTRINSIC(Arch, LongRotateLeft) \
-UNREACHABLE_INTRINSIC(Arch, IntegerRotateRight) \
-UNREACHABLE_INTRINSIC(Arch, LongRotateRight) \
-UNREACHABLE_INTRINSIC(Arch, IntegerCompare) \
-UNREACHABLE_INTRINSIC(Arch, LongCompare) \
-UNREACHABLE_INTRINSIC(Arch, IntegerSignum) \
-UNREACHABLE_INTRINSIC(Arch, LongSignum) \
-UNREACHABLE_INTRINSIC(Arch, StringCharAt) \
-UNREACHABLE_INTRINSIC(Arch, StringIsEmpty) \
-UNREACHABLE_INTRINSIC(Arch, StringLength) \
-UNREACHABLE_INTRINSIC(Arch, UnsafeLoadFence) \
-UNREACHABLE_INTRINSIC(Arch, UnsafeStoreFence) \
-UNREACHABLE_INTRINSIC(Arch, UnsafeFullFence)
+#define UNREACHABLE_INTRINSICS(Arch) \
+UNREACHABLE_INTRINSIC(Arch, FloatFloatToIntBits) \
+UNREACHABLE_INTRINSIC(Arch, DoubleDoubleToLongBits) \
+UNREACHABLE_INTRINSIC(Arch, FloatIsNaN) \
+UNREACHABLE_INTRINSIC(Arch, DoubleIsNaN) \
+UNREACHABLE_INTRINSIC(Arch, IntegerRotateLeft) \
+UNREACHABLE_INTRINSIC(Arch, LongRotateLeft) \
+UNREACHABLE_INTRINSIC(Arch, IntegerRotateRight) \
+UNREACHABLE_INTRINSIC(Arch, LongRotateRight) \
+UNREACHABLE_INTRINSIC(Arch, IntegerCompare) \
+UNREACHABLE_INTRINSIC(Arch, LongCompare) \
+UNREACHABLE_INTRINSIC(Arch, IntegerSignum) \
+UNREACHABLE_INTRINSIC(Arch, LongSignum) \
+UNREACHABLE_INTRINSIC(Arch, StringCharAt) \
+UNREACHABLE_INTRINSIC(Arch, StringIsEmpty) \
+UNREACHABLE_INTRINSIC(Arch, StringLength) \
+UNREACHABLE_INTRINSIC(Arch, UnsafeLoadFence) \
+UNREACHABLE_INTRINSIC(Arch, UnsafeStoreFence) \
+UNREACHABLE_INTRINSIC(Arch, UnsafeFullFence) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleFullFence) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleAcquireFence) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleReleaseFence) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleLoadLoadFence) \
+UNREACHABLE_INTRINSIC(Arch, VarHandleStoreStoreFence)
template <typename IntrinsicLocationsBuilder, typename Codegenerator>
bool IsCallFreeIntrinsic(HInvoke* invoke, Codegenerator* codegen) {
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 75a1ce7..ee07c4f 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -70,7 +70,7 @@
}
ArenaAllocator* IntrinsicCodeGeneratorARM64::GetAllocator() {
- return codegen_->GetGraph()->GetArena();
+ return codegen_->GetGraph()->GetAllocator();
}
#define __ codegen->GetVIXLAssembler()->
@@ -236,18 +236,16 @@
#define __ masm->
-static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
}
-static void CreateIntToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresFpuRegister());
}
@@ -267,10 +265,10 @@
}
void IntrinsicLocationsBuilderARM64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke);
+ CreateIntToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
@@ -281,10 +279,10 @@
}
void IntrinsicLocationsBuilderARM64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke);
+ CreateIntToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
@@ -294,10 +292,9 @@
MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetVIXLAssembler());
}
-static void CreateIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -324,7 +321,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitIntegerReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitIntegerReverseBytes(HInvoke* invoke) {
@@ -332,7 +329,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitLongReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitLongReverseBytes(HInvoke* invoke) {
@@ -340,17 +337,16 @@
}
void IntrinsicLocationsBuilderARM64::VisitShortReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitShortReverseBytes(HInvoke* invoke) {
GenReverseBytes(invoke->GetLocations(), DataType::Type::kInt16, GetVIXLAssembler());
}
-static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
@@ -368,7 +364,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
@@ -376,7 +372,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
@@ -396,7 +392,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
@@ -404,7 +400,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
@@ -423,7 +419,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitIntegerReverse(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitIntegerReverse(HInvoke* invoke) {
@@ -431,7 +427,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitLongReverse(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitLongReverse(HInvoke* invoke) {
@@ -456,7 +452,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitLongBitCount(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitLongBitCount(HInvoke* invoke) {
@@ -464,7 +460,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitIntegerBitCount(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitIntegerBitCount(HInvoke* invoke) {
@@ -489,7 +485,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitIntegerHighestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitIntegerHighestOneBit(HInvoke* invoke) {
@@ -497,7 +493,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitLongHighestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitLongHighestOneBit(HInvoke* invoke) {
@@ -518,7 +514,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitIntegerLowestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitIntegerLowestOneBit(HInvoke* invoke) {
@@ -526,17 +522,16 @@
}
void IntrinsicLocationsBuilderARM64::VisitLongLowestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitLongLowestOneBit(HInvoke* invoke) {
GenLowestOneBit(invoke, DataType::Type::kInt64, GetVIXLAssembler());
}
-static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
}
@@ -552,7 +547,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathAbsDouble(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathAbsDouble(HInvoke* invoke) {
@@ -560,7 +555,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathAbsFloat(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathAbsFloat(HInvoke* invoke) {
@@ -581,7 +576,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathAbsInt(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathAbsInt(HInvoke* invoke) {
@@ -589,7 +584,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathAbsLong(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathAbsLong(HInvoke* invoke) {
@@ -614,17 +609,16 @@
}
}
-static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
}
void IntrinsicLocationsBuilderARM64::VisitMathMinDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathMinDoubleDouble(HInvoke* invoke) {
@@ -632,7 +626,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathMinFloatFloat(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathMinFloatFloat(HInvoke* invoke) {
@@ -640,7 +634,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
@@ -648,7 +642,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathMaxFloatFloat(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathMaxFloatFloat(HInvoke* invoke) {
@@ -673,7 +667,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathMinIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathMinIntInt(HInvoke* invoke) {
@@ -681,7 +675,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathMinLongLong(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathMinLongLong(HInvoke* invoke) {
@@ -689,7 +683,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathMaxIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathMaxIntInt(HInvoke* invoke) {
@@ -697,7 +691,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathMaxLongLong(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathMaxLongLong(HInvoke* invoke) {
@@ -705,7 +699,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathSqrt(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathSqrt(HInvoke* invoke) {
@@ -715,7 +709,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathCeil(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathCeil(HInvoke* invoke) {
@@ -725,7 +719,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathFloor(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathFloor(HInvoke* invoke) {
@@ -735,7 +729,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathRint(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathRint(HInvoke* invoke) {
@@ -744,10 +738,9 @@
__ Frintn(DRegisterFrom(locations->Out()), DRegisterFrom(locations->InAt(0)));
}
-static void CreateFPToIntPlusFPTempLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToIntPlusFPTempLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
locations->AddTemp(Location::RequiresFpuRegister());
@@ -791,7 +784,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathRoundDouble(HInvoke* invoke) {
- CreateFPToIntPlusFPTempLocations(arena_, invoke);
+ CreateFPToIntPlusFPTempLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathRoundDouble(HInvoke* invoke) {
@@ -799,7 +792,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathRoundFloat(HInvoke* invoke) {
- CreateFPToIntPlusFPTempLocations(arena_, invoke);
+ CreateFPToIntPlusFPTempLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathRoundFloat(HInvoke* invoke) {
@@ -807,7 +800,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMemoryPeekByte(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPeekByte(HInvoke* invoke) {
@@ -817,7 +810,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMemoryPeekIntNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPeekIntNative(HInvoke* invoke) {
@@ -827,7 +820,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMemoryPeekLongNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPeekLongNative(HInvoke* invoke) {
@@ -837,7 +830,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMemoryPeekShortNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPeekShortNative(HInvoke* invoke) {
@@ -846,16 +839,15 @@
AbsoluteHeapOperandFrom(invoke->GetLocations()->InAt(0), 0));
}
-static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
}
void IntrinsicLocationsBuilderARM64::VisitMemoryPokeByte(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPokeByte(HInvoke* invoke) {
@@ -865,7 +857,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMemoryPokeIntNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPokeIntNative(HInvoke* invoke) {
@@ -875,7 +867,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMemoryPokeLongNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPokeLongNative(HInvoke* invoke) {
@@ -885,7 +877,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMemoryPokeShortNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMemoryPokeShortNative(HInvoke* invoke) {
@@ -895,9 +887,8 @@
}
void IntrinsicLocationsBuilderARM64::VisitThreadCurrentThread(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetOut(Location::RequiresRegister());
}
@@ -949,15 +940,16 @@
}
}
-static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
bool can_call = kEmitCompilerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
if (can_call && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
// We need a temporary register for the read barrier marking slow
@@ -972,22 +964,22 @@
}
void IntrinsicLocationsBuilderARM64::VisitUnsafeGet(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafeGetVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafeGetLong(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafeGetObject(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafeGet(HInvoke* invoke) {
@@ -1009,10 +1001,9 @@
GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
}
-static void CreateIntIntIntIntToVoid(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -1020,31 +1011,31 @@
}
void IntrinsicLocationsBuilderARM64::VisitUnsafePut(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafePutOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafePutVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafePutObject(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafePutLong(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
static void GenUnsafePut(HInvoke* invoke,
@@ -1151,17 +1142,18 @@
codegen_);
}
-static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena,
+static void CreateIntIntIntIntIntToInt(ArenaAllocator* allocator,
HInvoke* invoke,
DataType::Type type) {
bool can_call = kEmitCompilerReadBarrier &&
kUseBakerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -1265,10 +1257,10 @@
}
void IntrinsicLocationsBuilderARM64::VisitUnsafeCASInt(HInvoke* invoke) {
- CreateIntIntIntIntIntToInt(arena_, invoke, DataType::Type::kInt32);
+ CreateIntIntIntIntIntToInt(allocator_, invoke, DataType::Type::kInt32);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafeCASLong(HInvoke* invoke) {
- CreateIntIntIntIntIntToInt(arena_, invoke, DataType::Type::kInt64);
+ CreateIntIntIntIntIntToInt(allocator_, invoke, DataType::Type::kInt64);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafeCASObject(HInvoke* invoke) {
// The only read barrier implementation supporting the
@@ -1277,7 +1269,7 @@
return;
}
- CreateIntIntIntIntIntToInt(arena_, invoke, DataType::Type::kReference);
+ CreateIntIntIntIntIntToInt(allocator_, invoke, DataType::Type::kReference);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafeCASInt(HInvoke* invoke) {
@@ -1295,11 +1287,12 @@
}
void IntrinsicLocationsBuilderARM64::VisitStringCompareTo(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- invoke->InputAt(1)->CanBeNull()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke,
+ invoke->InputAt(1)->CanBeNull()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
@@ -1526,9 +1519,8 @@
}
void IntrinsicLocationsBuilderARM64::VisitStringEquals(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
@@ -1754,9 +1746,8 @@
}
void IntrinsicLocationsBuilderARM64::VisitStringIndexOf(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime calling convention. So it's
// best to align the inputs accordingly.
InvokeRuntimeCallingConvention calling_convention;
@@ -1774,9 +1765,8 @@
}
void IntrinsicLocationsBuilderARM64::VisitStringIndexOfAfter(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime calling convention. So it's
// best to align the inputs accordingly.
InvokeRuntimeCallingConvention calling_convention;
@@ -1792,9 +1782,8 @@
}
void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromBytes(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
@@ -1819,9 +1808,8 @@
}
void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromChars(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
@@ -1841,9 +1829,8 @@
}
void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromString(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kReference));
@@ -1864,29 +1851,27 @@
__ Bind(slow_path->GetExitLabel());
}
-static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
DCHECK_EQ(invoke->GetNumberOfArguments(), 1U);
DCHECK(DataType::IsFloatingPointType(invoke->InputAt(0)->GetType()));
DCHECK(DataType::IsFloatingPointType(invoke->GetType()));
- LocationSummary* const locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+ LocationSummary* const locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(calling_convention.GetReturnLocation(invoke->GetType()));
}
-static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
DCHECK_EQ(invoke->GetNumberOfArguments(), 2U);
DCHECK(DataType::IsFloatingPointType(invoke->InputAt(0)->GetType()));
DCHECK(DataType::IsFloatingPointType(invoke->InputAt(1)->GetType()));
DCHECK(DataType::IsFloatingPointType(invoke->GetType()));
- LocationSummary* const locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+ LocationSummary* const locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetFpuRegisterAt(0)));
@@ -1901,7 +1886,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathCos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathCos(HInvoke* invoke) {
@@ -1909,7 +1894,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathSin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathSin(HInvoke* invoke) {
@@ -1917,7 +1902,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathAcos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathAcos(HInvoke* invoke) {
@@ -1925,7 +1910,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathAsin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathAsin(HInvoke* invoke) {
@@ -1933,7 +1918,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathAtan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathAtan(HInvoke* invoke) {
@@ -1941,7 +1926,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathCbrt(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathCbrt(HInvoke* invoke) {
@@ -1949,7 +1934,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathCosh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathCosh(HInvoke* invoke) {
@@ -1957,7 +1942,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathExp(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathExp(HInvoke* invoke) {
@@ -1965,7 +1950,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathExpm1(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathExpm1(HInvoke* invoke) {
@@ -1973,7 +1958,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathLog(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathLog(HInvoke* invoke) {
@@ -1981,7 +1966,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathLog10(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathLog10(HInvoke* invoke) {
@@ -1989,7 +1974,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathSinh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathSinh(HInvoke* invoke) {
@@ -1997,7 +1982,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathTan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathTan(HInvoke* invoke) {
@@ -2005,7 +1990,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathTanh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathTanh(HInvoke* invoke) {
@@ -2013,7 +1998,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathAtan2(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathAtan2(HInvoke* invoke) {
@@ -2021,7 +2006,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathHypot(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathHypot(HInvoke* invoke) {
@@ -2029,7 +2014,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathNextAfter(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitMathNextAfter(HInvoke* invoke) {
@@ -2037,9 +2022,8 @@
}
void IntrinsicLocationsBuilderARM64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -2189,10 +2173,9 @@
}
}
- ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetArena();
- LocationSummary* locations = new (allocator) LocationSummary(invoke,
- LocationSummary::kCallOnSlowPath,
- kIntrinsified);
+ ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
// arraycopy(char[] src, int src_pos, char[] dst, int dst_pos, int length).
locations->SetInAt(0, Location::RequiresRegister());
SetSystemArrayCopyLocationRequires(locations, 1, invoke->InputAt(1));
@@ -2428,10 +2411,9 @@
return;
}
- ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetArena();
- LocationSummary* locations = new (allocator) LocationSummary(invoke,
- LocationSummary::kCallOnSlowPath,
- kIntrinsified);
+ ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
// arraycopy(Object src, int src_pos, Object dest, int dest_pos, int length).
locations->SetInAt(0, Location::RequiresRegister());
SetSystemArrayCopyLocationRequires(locations, 1, invoke->InputAt(1));
@@ -2937,7 +2919,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitFloatIsInfinite(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitFloatIsInfinite(HInvoke* invoke) {
@@ -2945,7 +2927,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitDoubleIsInfinite(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARM64::VisitDoubleIsInfinite(HInvoke* invoke) {
@@ -3026,9 +3008,8 @@
}
void IntrinsicLocationsBuilderARM64::VisitThreadInterrupted(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetOut(Location::RequiresRegister());
}
diff --git a/compiler/optimizing/intrinsics_arm64.h b/compiler/optimizing/intrinsics_arm64.h
index 5a6d180..3533c88 100644
--- a/compiler/optimizing/intrinsics_arm64.h
+++ b/compiler/optimizing/intrinsics_arm64.h
@@ -39,8 +39,8 @@
class IntrinsicLocationsBuilderARM64 FINAL : public IntrinsicVisitor {
public:
- explicit IntrinsicLocationsBuilderARM64(ArenaAllocator* arena, CodeGeneratorARM64* codegen)
- : arena_(arena), codegen_(codegen) {}
+ explicit IntrinsicLocationsBuilderARM64(ArenaAllocator* allocator, CodeGeneratorARM64* codegen)
+ : allocator_(allocator), codegen_(codegen) {}
// Define visitor methods.
@@ -57,7 +57,7 @@
bool TryDispatch(HInvoke* invoke);
private:
- ArenaAllocator* arena_;
+ ArenaAllocator* allocator_;
CodeGeneratorARM64* codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderARM64);
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 7ce576c..332306b 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -65,7 +65,7 @@
}
ArenaAllocator* IntrinsicCodeGeneratorARMVIXL::GetAllocator() {
- return codegen_->GetGraph()->GetArena();
+ return codegen_->GetGraph()->GetAllocator();
}
// Default slow-path for fallback (calling the managed code to handle the intrinsic) in an
@@ -246,7 +246,7 @@
};
IntrinsicLocationsBuilderARMVIXL::IntrinsicLocationsBuilderARMVIXL(CodeGeneratorARMVIXL* codegen)
- : arena_(codegen->GetGraph()->GetArena()),
+ : allocator_(codegen->GetGraph()->GetAllocator()),
codegen_(codegen),
assembler_(codegen->GetAssembler()),
features_(codegen->GetInstructionSetFeatures()) {}
@@ -260,18 +260,16 @@
return res->Intrinsified();
}
-static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
}
-static void CreateIntToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresFpuRegister());
}
@@ -297,10 +295,10 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke);
+ CreateIntToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
@@ -311,10 +309,10 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke);
+ CreateIntToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
@@ -324,26 +322,23 @@
MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
}
-static void CreateIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
-static void CreateLongToLongLocationsWithOverlap(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateLongToLongLocationsWithOverlap(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
}
-static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
}
@@ -376,7 +371,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
@@ -384,7 +379,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- CreateLongToLongLocationsWithOverlap(arena_, invoke);
+ CreateLongToLongLocationsWithOverlap(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
@@ -422,7 +417,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
@@ -430,7 +425,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- CreateLongToLongLocationsWithOverlap(arena_, invoke);
+ CreateLongToLongLocationsWithOverlap(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
@@ -442,7 +437,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathAbsDouble(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathAbsDouble(HInvoke* invoke) {
@@ -450,17 +445,16 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathAbsFloat(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathAbsFloat(HInvoke* invoke) {
MathAbsFP(invoke, GetAssembler());
}
-static void CreateIntToIntPlusTemp(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToIntPlusTemp(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
@@ -499,7 +493,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathAbsInt(HInvoke* invoke) {
- CreateIntToIntPlusTemp(arena_, invoke);
+ CreateIntToIntPlusTemp(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathAbsInt(HInvoke* invoke) {
@@ -508,7 +502,7 @@
void IntrinsicLocationsBuilderARMVIXL::VisitMathAbsLong(HInvoke* invoke) {
- CreateIntToIntPlusTemp(arena_, invoke);
+ CreateIntToIntPlusTemp(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathAbsLong(HInvoke* invoke) {
@@ -575,17 +569,16 @@
}
}
-static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::RequiresFpuRegister());
locations->SetOut(Location::SameAsFirstInput());
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathMinFloatFloat(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
invoke->GetLocations()->AddTemp(Location::RequiresRegister());
}
@@ -594,7 +587,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxFloatFloat(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
invoke->GetLocations()->AddTemp(Location::RequiresRegister());
}
@@ -654,7 +647,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathMinDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathMinDoubleDouble(HInvoke* invoke) {
@@ -662,7 +655,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxDoubleDouble(HInvoke* invoke) {
@@ -708,17 +701,16 @@
}
}
-static void CreateLongLongToLongLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateLongLongToLongLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathMinLongLong(HInvoke* invoke) {
- CreateLongLongToLongLocations(arena_, invoke);
+ CreateLongLongToLongLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathMinLongLong(HInvoke* invoke) {
@@ -726,7 +718,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxLongLong(HInvoke* invoke) {
- CreateLongLongToLongLocations(arena_, invoke);
+ CreateLongLongToLongLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxLongLong(HInvoke* invoke) {
@@ -751,17 +743,16 @@
}
}
-static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathMinIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathMinIntInt(HInvoke* invoke) {
@@ -769,7 +760,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxIntInt(HInvoke* invoke) {
@@ -777,7 +768,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathSqrt(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathSqrt(HInvoke* invoke) {
@@ -787,7 +778,7 @@
void IntrinsicLocationsBuilderARMVIXL::VisitMathRint(HInvoke* invoke) {
if (features_.HasARMv8AInstructions()) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
}
@@ -799,9 +790,8 @@
void IntrinsicLocationsBuilderARMVIXL::VisitMathRoundFloat(HInvoke* invoke) {
if (features_.HasARMv8AInstructions()) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
locations->AddTemp(Location::RequiresFpuRegister());
@@ -850,7 +840,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekByte(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekByte(HInvoke* invoke) {
@@ -860,7 +850,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekIntNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekIntNative(HInvoke* invoke) {
@@ -870,7 +860,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekLongNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekLongNative(HInvoke* invoke) {
@@ -891,7 +881,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekShortNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekShortNative(HInvoke* invoke) {
@@ -900,16 +890,15 @@
__ Ldrsh(OutputRegister(invoke), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0))));
}
-static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
}
void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeByte(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeByte(HInvoke* invoke) {
@@ -918,7 +907,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeIntNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeIntNative(HInvoke* invoke) {
@@ -927,7 +916,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeLongNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeLongNative(HInvoke* invoke) {
@@ -941,7 +930,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeShortNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeShortNative(HInvoke* invoke) {
@@ -950,9 +939,8 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitThreadCurrentThread(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetOut(Location::RequiresRegister());
}
@@ -1034,17 +1022,18 @@
}
}
-static void CreateIntIntIntToIntLocations(ArenaAllocator* arena,
+static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator,
HInvoke* invoke,
DataType::Type type) {
bool can_call = kEmitCompilerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
if (can_call && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -1061,22 +1050,22 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGet(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetLong(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetObject(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGet(HInvoke* invoke) {
@@ -1098,14 +1087,13 @@
GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
}
-static void CreateIntIntIntIntToVoid(ArenaAllocator* arena,
+static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator,
const ArmInstructionSetFeatures& features,
DataType::Type type,
bool is_volatile,
HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -1126,39 +1114,39 @@
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePut(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- arena_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- arena_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- arena_, features_, DataType::Type::kInt32, /* is_volatile */ true, invoke);
+ allocator_, features_, DataType::Type::kInt32, /* is_volatile */ true, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObject(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- arena_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- arena_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- arena_, features_, DataType::Type::kReference, /* is_volatile */ true, invoke);
+ allocator_, features_, DataType::Type::kReference, /* is_volatile */ true, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLong(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- arena_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLongOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- arena_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLongVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- arena_, features_, DataType::Type::kInt64, /* is_volatile */ true, invoke);
+ allocator_, features_, DataType::Type::kInt64, /* is_volatile */ true, invoke);
}
static void GenUnsafePut(LocationSummary* locations,
@@ -1284,17 +1272,18 @@
codegen_);
}
-static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* arena,
+static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* allocator,
HInvoke* invoke,
DataType::Type type) {
bool can_call = kEmitCompilerReadBarrier &&
kUseBakerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -1427,7 +1416,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeCASInt(HInvoke* invoke) {
- CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke, DataType::Type::kInt32);
+ CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke, DataType::Type::kInt32);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeCASObject(HInvoke* invoke) {
// The only read barrier implementation supporting the
@@ -1436,7 +1425,7 @@
return;
}
- CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke, DataType::Type::kReference);
+ CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke, DataType::Type::kReference);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeCASInt(HInvoke* invoke) {
GenCas(invoke, DataType::Type::kInt32, codegen_);
@@ -1451,11 +1440,12 @@
void IntrinsicLocationsBuilderARMVIXL::VisitStringCompareTo(HInvoke* invoke) {
// The inputs plus one temp.
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- invoke->InputAt(1)->CanBeNull()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke,
+ invoke->InputAt(1)->CanBeNull()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
@@ -1733,9 +1723,8 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringEquals(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
@@ -1974,9 +1963,8 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringIndexOf(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime calling convention. So it's
// best to align the inputs accordingly.
InvokeRuntimeCallingConventionARMVIXL calling_convention;
@@ -1994,9 +1982,8 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime calling convention. So it's
// best to align the inputs accordingly.
InvokeRuntimeCallingConventionARMVIXL calling_convention;
@@ -2012,9 +1999,8 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringNewStringFromBytes(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
@@ -2037,9 +2023,8 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringNewStringFromChars(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
@@ -2059,9 +2044,8 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringNewStringFromString(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
locations->SetOut(LocationFrom(r0));
@@ -2571,7 +2555,7 @@
__ Bind(intrinsic_slow_path->GetExitLabel());
}
-static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
// If the graph is debuggable, all callee-saved floating-point registers are blocked by
// the code generator. Furthermore, the register allocator creates fixed live intervals
// for all caller-saved registers because we are doing a function call. As a result, if
@@ -2585,9 +2569,8 @@
DCHECK_EQ(invoke->InputAt(0)->GetType(), DataType::Type::kFloat64);
DCHECK_EQ(invoke->GetType(), DataType::Type::kFloat64);
- LocationSummary* const locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+ LocationSummary* const locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
const InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, Location::RequiresFpuRegister());
@@ -2597,7 +2580,7 @@
locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(1)));
}
-static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
// If the graph is debuggable, all callee-saved floating-point registers are blocked by
// the code generator. Furthermore, the register allocator creates fixed live intervals
// for all caller-saved registers because we are doing a function call. As a result, if
@@ -2612,9 +2595,8 @@
DCHECK_EQ(invoke->InputAt(1)->GetType(), DataType::Type::kFloat64);
DCHECK_EQ(invoke->GetType(), DataType::Type::kFloat64);
- LocationSummary* const locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+ LocationSummary* const locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
const InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, Location::RequiresFpuRegister());
@@ -2669,7 +2651,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathCos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathCos(HInvoke* invoke) {
@@ -2677,7 +2659,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathSin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathSin(HInvoke* invoke) {
@@ -2685,7 +2667,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathAcos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathAcos(HInvoke* invoke) {
@@ -2693,7 +2675,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathAsin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathAsin(HInvoke* invoke) {
@@ -2701,7 +2683,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathAtan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathAtan(HInvoke* invoke) {
@@ -2709,7 +2691,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathCbrt(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathCbrt(HInvoke* invoke) {
@@ -2717,7 +2699,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathCosh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathCosh(HInvoke* invoke) {
@@ -2725,7 +2707,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathExp(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathExp(HInvoke* invoke) {
@@ -2733,7 +2715,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathExpm1(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathExpm1(HInvoke* invoke) {
@@ -2741,7 +2723,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathLog(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathLog(HInvoke* invoke) {
@@ -2749,7 +2731,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathLog10(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathLog10(HInvoke* invoke) {
@@ -2757,7 +2739,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathSinh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathSinh(HInvoke* invoke) {
@@ -2765,7 +2747,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathTan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathTan(HInvoke* invoke) {
@@ -2773,7 +2755,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathTanh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathTanh(HInvoke* invoke) {
@@ -2781,7 +2763,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathAtan2(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathAtan2(HInvoke* invoke) {
@@ -2789,7 +2771,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathHypot(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathHypot(HInvoke* invoke) {
@@ -2797,7 +2779,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathNextAfter(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitMathNextAfter(HInvoke* invoke) {
@@ -2805,7 +2787,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitIntegerReverse(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitIntegerReverse(HInvoke* invoke) {
@@ -2814,7 +2796,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitLongReverse(HInvoke* invoke) {
- CreateLongToLongLocationsWithOverlap(arena_, invoke);
+ CreateLongToLongLocationsWithOverlap(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitLongReverse(HInvoke* invoke) {
@@ -2831,7 +2813,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitIntegerReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitIntegerReverseBytes(HInvoke* invoke) {
@@ -2840,7 +2822,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitLongReverseBytes(HInvoke* invoke) {
- CreateLongToLongLocationsWithOverlap(arena_, invoke);
+ CreateLongToLongLocationsWithOverlap(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitLongReverseBytes(HInvoke* invoke) {
@@ -2857,7 +2839,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitShortReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitShortReverseBytes(HInvoke* invoke) {
@@ -2894,7 +2876,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitIntegerBitCount(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
invoke->GetLocations()->AddTemp(Location::RequiresFpuRegister());
}
@@ -2961,7 +2943,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitIntegerHighestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitIntegerHighestOneBit(HInvoke* invoke) {
@@ -2969,7 +2951,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitLongHighestOneBit(HInvoke* invoke) {
- CreateLongToLongLocationsWithOverlap(arena_, invoke);
+ CreateLongToLongLocationsWithOverlap(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitLongHighestOneBit(HInvoke* invoke) {
@@ -3026,7 +3008,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitIntegerLowestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitIntegerLowestOneBit(HInvoke* invoke) {
@@ -3034,7 +3016,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitLongLowestOneBit(HInvoke* invoke) {
- CreateLongToLongLocationsWithOverlap(arena_, invoke);
+ CreateLongToLongLocationsWithOverlap(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitLongLowestOneBit(HInvoke* invoke) {
@@ -3042,9 +3024,8 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringGetCharsNoCheck(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -3170,7 +3151,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitFloatIsInfinite(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitFloatIsInfinite(HInvoke* invoke) {
@@ -3188,7 +3169,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitDoubleIsInfinite(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorARMVIXL::VisitDoubleIsInfinite(HInvoke* invoke) {
@@ -3215,7 +3196,7 @@
void IntrinsicLocationsBuilderARMVIXL::VisitMathCeil(HInvoke* invoke) {
if (features_.HasARMv8AInstructions()) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
}
@@ -3227,7 +3208,7 @@
void IntrinsicLocationsBuilderARMVIXL::VisitMathFloor(HInvoke* invoke) {
if (features_.HasARMv8AInstructions()) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
}
@@ -3309,9 +3290,8 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitThreadInterrupted(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetOut(Location::RequiresRegister());
}
diff --git a/compiler/optimizing/intrinsics_arm_vixl.h b/compiler/optimizing/intrinsics_arm_vixl.h
index a4a2830..4f18ca3 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.h
+++ b/compiler/optimizing/intrinsics_arm_vixl.h
@@ -46,7 +46,7 @@
bool TryDispatch(HInvoke* invoke);
private:
- ArenaAllocator* arena_;
+ ArenaAllocator* allocator_;
CodeGenerator* codegen_;
ArmVIXLAssembler* assembler_;
const ArmInstructionSetFeatures& features_;
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 8847256..5f2f71b 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -35,7 +35,7 @@
namespace mips {
IntrinsicLocationsBuilderMIPS::IntrinsicLocationsBuilderMIPS(CodeGeneratorMIPS* codegen)
- : codegen_(codegen), arena_(codegen->GetGraph()->GetArena()) {
+ : codegen_(codegen), allocator_(codegen->GetGraph()->GetAllocator()) {
}
MipsAssembler* IntrinsicCodeGeneratorMIPS::GetAssembler() {
@@ -43,7 +43,7 @@
}
ArenaAllocator* IntrinsicCodeGeneratorMIPS::GetAllocator() {
- return codegen_->GetGraph()->GetArena();
+ return codegen_->GetGraph()->GetAllocator();
}
inline bool IntrinsicCodeGeneratorMIPS::IsR2OrNewer() const {
@@ -152,10 +152,9 @@
#define __ assembler->
-static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
}
@@ -178,7 +177,7 @@
// long java.lang.Double.doubleToRawLongBits(double)
void IntrinsicLocationsBuilderMIPS::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
@@ -187,17 +186,16 @@
// int java.lang.Float.floatToRawIntBits(float)
void IntrinsicLocationsBuilderMIPS::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
}
-static void CreateIntToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresFpuRegister());
}
@@ -220,7 +218,7 @@
// double java.lang.Double.longBitsToDouble(long)
void IntrinsicLocationsBuilderMIPS::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke);
+ CreateIntToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
@@ -229,19 +227,18 @@
// float java.lang.Float.intBitsToFloat(int)
void IntrinsicLocationsBuilderMIPS::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke);
+ CreateIntToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitFloatIntBitsToFloat(HInvoke* invoke) {
MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
}
-static void CreateIntToIntLocations(ArenaAllocator* arena,
+static void CreateIntToIntLocations(ArenaAllocator* allocator,
HInvoke* invoke,
Location::OutputOverlap overlaps = Location::kNoOutputOverlap) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), overlaps);
}
@@ -402,7 +399,7 @@
// int java.lang.Integer.reverseBytes(int)
void IntrinsicLocationsBuilderMIPS::VisitIntegerReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitIntegerReverseBytes(HInvoke* invoke) {
@@ -416,7 +413,7 @@
// long java.lang.Long.reverseBytes(long)
void IntrinsicLocationsBuilderMIPS::VisitLongReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitLongReverseBytes(HInvoke* invoke) {
@@ -430,7 +427,7 @@
// short java.lang.Short.reverseBytes(short)
void IntrinsicLocationsBuilderMIPS::VisitShortReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitShortReverseBytes(HInvoke* invoke) {
@@ -474,7 +471,7 @@
// int java.lang.Integer.numberOfLeadingZeros(int i)
void IntrinsicLocationsBuilderMIPS::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
@@ -483,7 +480,7 @@
// int java.lang.Long.numberOfLeadingZeros(long i)
void IntrinsicLocationsBuilderMIPS::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
@@ -561,7 +558,7 @@
// int java.lang.Integer.numberOfTrailingZeros(int i)
void IntrinsicLocationsBuilderMIPS::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke, Location::kOutputOverlap);
+ CreateIntToIntLocations(allocator_, invoke, Location::kOutputOverlap);
}
void IntrinsicCodeGeneratorMIPS::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
@@ -570,7 +567,7 @@
// int java.lang.Long.numberOfTrailingZeros(long i)
void IntrinsicLocationsBuilderMIPS::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke, Location::kOutputOverlap);
+ CreateIntToIntLocations(allocator_, invoke, Location::kOutputOverlap);
}
void IntrinsicCodeGeneratorMIPS::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
@@ -579,7 +576,7 @@
// int java.lang.Integer.reverse(int)
void IntrinsicLocationsBuilderMIPS::VisitIntegerReverse(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitIntegerReverse(HInvoke* invoke) {
@@ -593,7 +590,7 @@
// long java.lang.Long.reverse(long)
void IntrinsicLocationsBuilderMIPS::VisitLongReverse(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitLongReverse(HInvoke* invoke) {
@@ -605,10 +602,9 @@
GetAssembler());
}
-static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
}
@@ -725,7 +721,7 @@
// int java.lang.Integer.bitCount(int)
void IntrinsicLocationsBuilderMIPS::VisitIntegerBitCount(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitIntegerBitCount(HInvoke* invoke) {
@@ -734,9 +730,8 @@
// int java.lang.Long.bitCount(int)
void IntrinsicLocationsBuilderMIPS::VisitLongBitCount(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
@@ -801,7 +796,7 @@
// double java.lang.Math.abs(double)
void IntrinsicLocationsBuilderMIPS::VisitMathAbsDouble(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathAbsDouble(HInvoke* invoke) {
@@ -810,7 +805,7 @@
// float java.lang.Math.abs(float)
void IntrinsicLocationsBuilderMIPS::VisitMathAbsFloat(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathAbsFloat(HInvoke* invoke) {
@@ -847,7 +842,7 @@
// int java.lang.Math.abs(int)
void IntrinsicLocationsBuilderMIPS::VisitMathAbsInt(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathAbsInt(HInvoke* invoke) {
@@ -856,7 +851,7 @@
// long java.lang.Math.abs(long)
void IntrinsicLocationsBuilderMIPS::VisitMathAbsLong(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathAbsLong(HInvoke* invoke) {
@@ -1026,10 +1021,9 @@
}
}
-static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(), Location::kOutputOverlap);
@@ -1037,7 +1031,7 @@
// double java.lang.Math.min(double, double)
void IntrinsicLocationsBuilderMIPS::VisitMathMinDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathMinDoubleDouble(HInvoke* invoke) {
@@ -1050,7 +1044,7 @@
// float java.lang.Math.min(float, float)
void IntrinsicLocationsBuilderMIPS::VisitMathMinFloatFloat(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathMinFloatFloat(HInvoke* invoke) {
@@ -1063,7 +1057,7 @@
// double java.lang.Math.max(double, double)
void IntrinsicLocationsBuilderMIPS::VisitMathMaxDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathMaxDoubleDouble(HInvoke* invoke) {
@@ -1076,7 +1070,7 @@
// float java.lang.Math.max(float, float)
void IntrinsicLocationsBuilderMIPS::VisitMathMaxFloatFloat(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathMaxFloatFloat(HInvoke* invoke) {
@@ -1087,10 +1081,9 @@
GetAssembler());
}
-static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
@@ -1267,7 +1260,7 @@
// int java.lang.Math.min(int, int)
void IntrinsicLocationsBuilderMIPS::VisitMathMinIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathMinIntInt(HInvoke* invoke) {
@@ -1280,7 +1273,7 @@
// long java.lang.Math.min(long, long)
void IntrinsicLocationsBuilderMIPS::VisitMathMinLongLong(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathMinLongLong(HInvoke* invoke) {
@@ -1293,7 +1286,7 @@
// int java.lang.Math.max(int, int)
void IntrinsicLocationsBuilderMIPS::VisitMathMaxIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathMaxIntInt(HInvoke* invoke) {
@@ -1306,7 +1299,7 @@
// long java.lang.Math.max(long, long)
void IntrinsicLocationsBuilderMIPS::VisitMathMaxLongLong(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathMaxLongLong(HInvoke* invoke) {
@@ -1319,7 +1312,7 @@
// double java.lang.Math.sqrt(double)
void IntrinsicLocationsBuilderMIPS::VisitMathSqrt(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathSqrt(HInvoke* invoke) {
@@ -1333,7 +1326,7 @@
// byte libcore.io.Memory.peekByte(long address)
void IntrinsicLocationsBuilderMIPS::VisitMemoryPeekByte(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekByte(HInvoke* invoke) {
@@ -1346,7 +1339,7 @@
// short libcore.io.Memory.peekShort(long address)
void IntrinsicLocationsBuilderMIPS::VisitMemoryPeekShortNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekShortNative(HInvoke* invoke) {
@@ -1378,7 +1371,7 @@
// int libcore.io.Memory.peekInt(long address)
void IntrinsicLocationsBuilderMIPS::VisitMemoryPeekIntNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke, Location::kOutputOverlap);
+ CreateIntToIntLocations(allocator_, invoke, Location::kOutputOverlap);
}
void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekIntNative(HInvoke* invoke) {
@@ -1396,7 +1389,7 @@
// long libcore.io.Memory.peekLong(long address)
void IntrinsicLocationsBuilderMIPS::VisitMemoryPeekLongNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke, Location::kOutputOverlap);
+ CreateIntToIntLocations(allocator_, invoke, Location::kOutputOverlap);
}
void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekLongNative(HInvoke* invoke) {
@@ -1416,17 +1409,16 @@
}
}
-static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
}
// void libcore.io.Memory.pokeByte(long address, byte value)
void IntrinsicLocationsBuilderMIPS::VisitMemoryPokeByte(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeByte(HInvoke* invoke) {
@@ -1439,7 +1431,7 @@
// void libcore.io.Memory.pokeShort(long address, short value)
void IntrinsicLocationsBuilderMIPS::VisitMemoryPokeShortNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeShortNative(HInvoke* invoke) {
@@ -1461,7 +1453,7 @@
// void libcore.io.Memory.pokeInt(long address, int value)
void IntrinsicLocationsBuilderMIPS::VisitMemoryPokeIntNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeIntNative(HInvoke* invoke) {
@@ -1479,7 +1471,7 @@
// void libcore.io.Memory.pokeLong(long address, long value)
void IntrinsicLocationsBuilderMIPS::VisitMemoryPokeLongNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeLongNative(HInvoke* invoke) {
@@ -1501,9 +1493,8 @@
// Thread java.lang.Thread.currentThread()
void IntrinsicLocationsBuilderMIPS::VisitThreadCurrentThread(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetOut(Location::RequiresRegister());
}
@@ -1517,17 +1508,18 @@
Thread::PeerOffset<kMipsPointerSize>().Int32Value());
}
-static void CreateIntIntIntToIntLocations(ArenaAllocator* arena,
+static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator,
HInvoke* invoke,
DataType::Type type) {
bool can_call = kEmitCompilerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
if (can_call && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -1657,7 +1649,7 @@
// int sun.misc.Unsafe.getInt(Object o, long offset)
void IntrinsicLocationsBuilderMIPS::VisitUnsafeGet(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGet(HInvoke* invoke) {
@@ -1666,7 +1658,7 @@
// int sun.misc.Unsafe.getIntVolatile(Object o, long offset)
void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetVolatile(HInvoke* invoke) {
@@ -1675,7 +1667,7 @@
// long sun.misc.Unsafe.getLong(Object o, long offset)
void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetLong(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetLong(HInvoke* invoke) {
@@ -1684,7 +1676,7 @@
// Object sun.misc.Unsafe.getObject(Object o, long offset)
void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetObject(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObject(HInvoke* invoke) {
@@ -1693,17 +1685,16 @@
// Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset)
void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, IsR6(), codegen_);
}
-static void CreateIntIntIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -1774,7 +1765,7 @@
// void sun.misc.Unsafe.putInt(Object o, long offset, int x)
void IntrinsicLocationsBuilderMIPS::VisitUnsafePut(HInvoke* invoke) {
- CreateIntIntIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafePut(HInvoke* invoke) {
@@ -1788,7 +1779,7 @@
// void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x)
void IntrinsicLocationsBuilderMIPS::VisitUnsafePutOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutOrdered(HInvoke* invoke) {
@@ -1802,7 +1793,7 @@
// void sun.misc.Unsafe.putIntVolatile(Object o, long offset, int x)
void IntrinsicLocationsBuilderMIPS::VisitUnsafePutVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutVolatile(HInvoke* invoke) {
@@ -1816,7 +1807,7 @@
// void sun.misc.Unsafe.putObject(Object o, long offset, Object x)
void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObject(HInvoke* invoke) {
- CreateIntIntIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObject(HInvoke* invoke) {
@@ -1830,7 +1821,7 @@
// void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x)
void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
@@ -1844,7 +1835,7 @@
// void sun.misc.Unsafe.putObjectVolatile(Object o, long offset, Object x)
void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
@@ -1858,7 +1849,7 @@
// void sun.misc.Unsafe.putLong(Object o, long offset, long x)
void IntrinsicLocationsBuilderMIPS::VisitUnsafePutLong(HInvoke* invoke) {
- CreateIntIntIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLong(HInvoke* invoke) {
@@ -1872,7 +1863,7 @@
// void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x)
void IntrinsicLocationsBuilderMIPS::VisitUnsafePutLongOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLongOrdered(HInvoke* invoke) {
@@ -1884,15 +1875,16 @@
codegen_);
}
-static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* allocator, HInvoke* invoke) {
bool can_call = kEmitCompilerReadBarrier &&
kUseBakerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -2016,7 +2008,7 @@
// boolean sun.misc.Unsafe.compareAndSwapInt(Object o, long offset, int expected, int x)
void IntrinsicLocationsBuilderMIPS::VisitUnsafeCASInt(HInvoke* invoke) {
- CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
+ CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeCASInt(HInvoke* invoke) {
@@ -2031,7 +2023,7 @@
return;
}
- CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
+ CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeCASObject(HInvoke* invoke) {
@@ -2044,9 +2036,8 @@
// int java.lang.String.compareTo(String anotherString)
void IntrinsicLocationsBuilderMIPS::VisitStringCompareTo(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -2071,9 +2062,8 @@
// boolean java.lang.String.equals(Object anObject)
void IntrinsicLocationsBuilderMIPS::VisitStringEquals(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
@@ -2248,9 +2238,8 @@
// int java.lang.String.indexOf(int ch)
void IntrinsicLocationsBuilderMIPS::VisitStringIndexOf(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime
// calling convention. So it's best to align the inputs accordingly.
InvokeRuntimeCallingConvention calling_convention;
@@ -2273,9 +2262,8 @@
// int java.lang.String.indexOf(int ch, int fromIndex)
void IntrinsicLocationsBuilderMIPS::VisitStringIndexOfAfter(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime
// calling convention. So it's best to align the inputs accordingly.
InvokeRuntimeCallingConvention calling_convention;
@@ -2299,9 +2287,8 @@
// java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
void IntrinsicLocationsBuilderMIPS::VisitStringNewStringFromBytes(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -2325,9 +2312,8 @@
// java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
void IntrinsicLocationsBuilderMIPS::VisitStringNewStringFromChars(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -2348,9 +2334,8 @@
// java.lang.StringFactory.newStringFromString(String toCopy)
void IntrinsicLocationsBuilderMIPS::VisitStringNewStringFromString(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32);
@@ -2411,7 +2396,7 @@
// boolean java.lang.Float.isInfinite(float)
void IntrinsicLocationsBuilderMIPS::VisitFloatIsInfinite(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitFloatIsInfinite(HInvoke* invoke) {
@@ -2420,7 +2405,7 @@
// boolean java.lang.Double.isInfinite(double)
void IntrinsicLocationsBuilderMIPS::VisitDoubleIsInfinite(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitDoubleIsInfinite(HInvoke* invoke) {
@@ -2476,7 +2461,7 @@
// int java.lang.Integer.highestOneBit(int)
void IntrinsicLocationsBuilderMIPS::VisitIntegerHighestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitIntegerHighestOneBit(HInvoke* invoke) {
@@ -2485,7 +2470,7 @@
// long java.lang.Long.highestOneBit(long)
void IntrinsicLocationsBuilderMIPS::VisitLongHighestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke, Location::kOutputOverlap);
+ CreateIntToIntLocations(allocator_, invoke, Location::kOutputOverlap);
}
void IntrinsicCodeGeneratorMIPS::VisitLongHighestOneBit(HInvoke* invoke) {
@@ -2524,7 +2509,7 @@
// int java.lang.Integer.lowestOneBit(int)
void IntrinsicLocationsBuilderMIPS::VisitIntegerLowestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitIntegerLowestOneBit(HInvoke* invoke) {
@@ -2533,7 +2518,7 @@
// long java.lang.Long.lowestOneBit(long)
void IntrinsicLocationsBuilderMIPS::VisitLongLowestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitLongLowestOneBit(HInvoke* invoke) {
@@ -2542,9 +2527,8 @@
// int java.lang.Math.round(float)
void IntrinsicLocationsBuilderMIPS::VisitMathRoundFloat(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->AddTemp(Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
@@ -2667,9 +2651,8 @@
// void java.lang.String.getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin)
void IntrinsicLocationsBuilderMIPS::VisitStringGetCharsNoCheck(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -2757,20 +2740,18 @@
__ Bind(&done);
}
-static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+static void CreateFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kFloat64));
}
-static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
@@ -2804,7 +2785,7 @@
// static double java.lang.Math.cos(double a)
void IntrinsicLocationsBuilderMIPS::VisitMathCos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathCos(HInvoke* invoke) {
@@ -2813,7 +2794,7 @@
// static double java.lang.Math.sin(double a)
void IntrinsicLocationsBuilderMIPS::VisitMathSin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathSin(HInvoke* invoke) {
@@ -2822,7 +2803,7 @@
// static double java.lang.Math.acos(double a)
void IntrinsicLocationsBuilderMIPS::VisitMathAcos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathAcos(HInvoke* invoke) {
@@ -2831,7 +2812,7 @@
// static double java.lang.Math.asin(double a)
void IntrinsicLocationsBuilderMIPS::VisitMathAsin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathAsin(HInvoke* invoke) {
@@ -2840,7 +2821,7 @@
// static double java.lang.Math.atan(double a)
void IntrinsicLocationsBuilderMIPS::VisitMathAtan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathAtan(HInvoke* invoke) {
@@ -2849,7 +2830,7 @@
// static double java.lang.Math.atan2(double y, double x)
void IntrinsicLocationsBuilderMIPS::VisitMathAtan2(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathAtan2(HInvoke* invoke) {
@@ -2858,7 +2839,7 @@
// static double java.lang.Math.cbrt(double a)
void IntrinsicLocationsBuilderMIPS::VisitMathCbrt(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathCbrt(HInvoke* invoke) {
@@ -2867,7 +2848,7 @@
// static double java.lang.Math.cosh(double x)
void IntrinsicLocationsBuilderMIPS::VisitMathCosh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathCosh(HInvoke* invoke) {
@@ -2876,7 +2857,7 @@
// static double java.lang.Math.exp(double a)
void IntrinsicLocationsBuilderMIPS::VisitMathExp(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathExp(HInvoke* invoke) {
@@ -2885,7 +2866,7 @@
// static double java.lang.Math.expm1(double x)
void IntrinsicLocationsBuilderMIPS::VisitMathExpm1(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathExpm1(HInvoke* invoke) {
@@ -2894,7 +2875,7 @@
// static double java.lang.Math.hypot(double x, double y)
void IntrinsicLocationsBuilderMIPS::VisitMathHypot(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathHypot(HInvoke* invoke) {
@@ -2903,7 +2884,7 @@
// static double java.lang.Math.log(double a)
void IntrinsicLocationsBuilderMIPS::VisitMathLog(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathLog(HInvoke* invoke) {
@@ -2912,7 +2893,7 @@
// static double java.lang.Math.log10(double x)
void IntrinsicLocationsBuilderMIPS::VisitMathLog10(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathLog10(HInvoke* invoke) {
@@ -2921,7 +2902,7 @@
// static double java.lang.Math.nextAfter(double start, double direction)
void IntrinsicLocationsBuilderMIPS::VisitMathNextAfter(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathNextAfter(HInvoke* invoke) {
@@ -2930,7 +2911,7 @@
// static double java.lang.Math.sinh(double x)
void IntrinsicLocationsBuilderMIPS::VisitMathSinh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathSinh(HInvoke* invoke) {
@@ -2939,7 +2920,7 @@
// static double java.lang.Math.tan(double a)
void IntrinsicLocationsBuilderMIPS::VisitMathTan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathTan(HInvoke* invoke) {
@@ -2948,7 +2929,7 @@
// static double java.lang.Math.tanh(double x)
void IntrinsicLocationsBuilderMIPS::VisitMathTanh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS::VisitMathTanh(HInvoke* invoke) {
@@ -2982,7 +2963,7 @@
// Okay, it is safe to generate inline code.
LocationSummary* locations =
- new (arena_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
// arraycopy(Object src, int srcPos, Object dest, int destPos, int length).
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
diff --git a/compiler/optimizing/intrinsics_mips.h b/compiler/optimizing/intrinsics_mips.h
index 05d1aa2..afd9548 100644
--- a/compiler/optimizing/intrinsics_mips.h
+++ b/compiler/optimizing/intrinsics_mips.h
@@ -50,7 +50,7 @@
private:
CodeGeneratorMIPS* codegen_;
- ArenaAllocator* arena_;
+ ArenaAllocator* allocator_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderMIPS);
};
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index d0234d8..8d5be80 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -35,7 +35,7 @@
namespace mips64 {
IntrinsicLocationsBuilderMIPS64::IntrinsicLocationsBuilderMIPS64(CodeGeneratorMIPS64* codegen)
- : codegen_(codegen), arena_(codegen->GetGraph()->GetArena()) {
+ : codegen_(codegen), allocator_(codegen->GetGraph()->GetAllocator()) {
}
Mips64Assembler* IntrinsicCodeGeneratorMIPS64::GetAssembler() {
@@ -43,7 +43,7 @@
}
ArenaAllocator* IntrinsicCodeGeneratorMIPS64::GetAllocator() {
- return codegen_->GetGraph()->GetArena();
+ return codegen_->GetGraph()->GetAllocator();
}
#define __ codegen->GetAssembler()->
@@ -141,10 +141,9 @@
#define __ assembler->
-static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
}
@@ -162,7 +161,7 @@
// long java.lang.Double.doubleToRawLongBits(double)
void IntrinsicLocationsBuilderMIPS64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
@@ -171,17 +170,16 @@
// int java.lang.Float.floatToRawIntBits(float)
void IntrinsicLocationsBuilderMIPS64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
}
-static void CreateIntToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresFpuRegister());
}
@@ -199,7 +197,7 @@
// double java.lang.Double.longBitsToDouble(long)
void IntrinsicLocationsBuilderMIPS64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke);
+ CreateIntToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
@@ -208,17 +206,16 @@
// float java.lang.Float.intBitsToFloat(int)
void IntrinsicLocationsBuilderMIPS64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke);
+ CreateIntToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
}
-static void CreateIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -250,7 +247,7 @@
// int java.lang.Integer.reverseBytes(int)
void IntrinsicLocationsBuilderMIPS64::VisitIntegerReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerReverseBytes(HInvoke* invoke) {
@@ -259,7 +256,7 @@
// long java.lang.Long.reverseBytes(long)
void IntrinsicLocationsBuilderMIPS64::VisitLongReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitLongReverseBytes(HInvoke* invoke) {
@@ -268,7 +265,7 @@
// short java.lang.Short.reverseBytes(short)
void IntrinsicLocationsBuilderMIPS64::VisitShortReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitShortReverseBytes(HInvoke* invoke) {
@@ -290,7 +287,7 @@
// int java.lang.Integer.numberOfLeadingZeros(int i)
void IntrinsicLocationsBuilderMIPS64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
@@ -299,7 +296,7 @@
// int java.lang.Long.numberOfLeadingZeros(long i)
void IntrinsicLocationsBuilderMIPS64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
@@ -327,7 +324,7 @@
// int java.lang.Integer.numberOfTrailingZeros(int i)
void IntrinsicLocationsBuilderMIPS64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
@@ -336,7 +333,7 @@
// int java.lang.Long.numberOfTrailingZeros(long i)
void IntrinsicLocationsBuilderMIPS64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
@@ -364,7 +361,7 @@
// int java.lang.Integer.reverse(int)
void IntrinsicLocationsBuilderMIPS64::VisitIntegerReverse(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerReverse(HInvoke* invoke) {
@@ -373,17 +370,16 @@
// long java.lang.Long.reverse(long)
void IntrinsicLocationsBuilderMIPS64::VisitLongReverse(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitLongReverse(HInvoke* invoke) {
GenReverse(invoke->GetLocations(), DataType::Type::kInt64, GetAssembler());
}
-static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
}
@@ -458,7 +454,7 @@
// int java.lang.Integer.bitCount(int)
void IntrinsicLocationsBuilderMIPS64::VisitIntegerBitCount(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerBitCount(HInvoke* invoke) {
@@ -467,7 +463,7 @@
// int java.lang.Long.bitCount(long)
void IntrinsicLocationsBuilderMIPS64::VisitLongBitCount(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitLongBitCount(HInvoke* invoke) {
@@ -487,7 +483,7 @@
// double java.lang.Math.abs(double)
void IntrinsicLocationsBuilderMIPS64::VisitMathAbsDouble(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathAbsDouble(HInvoke* invoke) {
@@ -496,17 +492,16 @@
// float java.lang.Math.abs(float)
void IntrinsicLocationsBuilderMIPS64::VisitMathAbsFloat(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathAbsFloat(HInvoke* invoke) {
MathAbsFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
}
-static void CreateIntToInt(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToInt(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -528,7 +523,7 @@
// int java.lang.Math.abs(int)
void IntrinsicLocationsBuilderMIPS64::VisitMathAbsInt(HInvoke* invoke) {
- CreateIntToInt(arena_, invoke);
+ CreateIntToInt(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathAbsInt(HInvoke* invoke) {
@@ -537,7 +532,7 @@
// long java.lang.Math.abs(long)
void IntrinsicLocationsBuilderMIPS64::VisitMathAbsLong(HInvoke* invoke) {
- CreateIntToInt(arena_, invoke);
+ CreateIntToInt(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathAbsLong(HInvoke* invoke) {
@@ -613,10 +608,9 @@
__ Bind(&done);
}
-static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
@@ -624,7 +618,7 @@
// double java.lang.Math.min(double, double)
void IntrinsicLocationsBuilderMIPS64::VisitMathMinDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathMinDoubleDouble(HInvoke* invoke) {
@@ -633,7 +627,7 @@
// float java.lang.Math.min(float, float)
void IntrinsicLocationsBuilderMIPS64::VisitMathMinFloatFloat(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathMinFloatFloat(HInvoke* invoke) {
@@ -642,7 +636,7 @@
// double java.lang.Math.max(double, double)
void IntrinsicLocationsBuilderMIPS64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
@@ -651,7 +645,7 @@
// float java.lang.Math.max(float, float)
void IntrinsicLocationsBuilderMIPS64::VisitMathMaxFloatFloat(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathMaxFloatFloat(HInvoke* invoke) {
@@ -716,10 +710,9 @@
}
}
-static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
@@ -727,7 +720,7 @@
// int java.lang.Math.min(int, int)
void IntrinsicLocationsBuilderMIPS64::VisitMathMinIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathMinIntInt(HInvoke* invoke) {
@@ -736,7 +729,7 @@
// long java.lang.Math.min(long, long)
void IntrinsicLocationsBuilderMIPS64::VisitMathMinLongLong(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathMinLongLong(HInvoke* invoke) {
@@ -745,7 +738,7 @@
// int java.lang.Math.max(int, int)
void IntrinsicLocationsBuilderMIPS64::VisitMathMaxIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathMaxIntInt(HInvoke* invoke) {
@@ -754,7 +747,7 @@
// long java.lang.Math.max(long, long)
void IntrinsicLocationsBuilderMIPS64::VisitMathMaxLongLong(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathMaxLongLong(HInvoke* invoke) {
@@ -763,7 +756,7 @@
// double java.lang.Math.sqrt(double)
void IntrinsicLocationsBuilderMIPS64::VisitMathSqrt(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathSqrt(HInvoke* invoke) {
@@ -775,19 +768,18 @@
__ SqrtD(out, in);
}
-static void CreateFPToFP(ArenaAllocator* arena,
+static void CreateFPToFP(ArenaAllocator* allocator,
HInvoke* invoke,
Location::OutputOverlap overlaps = Location::kOutputOverlap) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister(), overlaps);
}
// double java.lang.Math.rint(double)
void IntrinsicLocationsBuilderMIPS64::VisitMathRint(HInvoke* invoke) {
- CreateFPToFP(arena_, invoke, Location::kNoOutputOverlap);
+ CreateFPToFP(allocator_, invoke, Location::kNoOutputOverlap);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathRint(HInvoke* invoke) {
@@ -801,7 +793,7 @@
// double java.lang.Math.floor(double)
void IntrinsicLocationsBuilderMIPS64::VisitMathFloor(HInvoke* invoke) {
- CreateFPToFP(arena_, invoke);
+ CreateFPToFP(allocator_, invoke);
}
const constexpr uint16_t kFPLeaveUnchanged = kPositiveZero |
@@ -878,7 +870,7 @@
// double java.lang.Math.ceil(double)
void IntrinsicLocationsBuilderMIPS64::VisitMathCeil(HInvoke* invoke) {
- CreateFPToFP(arena_, invoke);
+ CreateFPToFP(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathCeil(HInvoke* invoke) {
@@ -961,9 +953,8 @@
// int java.lang.Math.round(float)
void IntrinsicLocationsBuilderMIPS64::VisitMathRoundFloat(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->AddTemp(Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
@@ -975,9 +966,8 @@
// long java.lang.Math.round(double)
void IntrinsicLocationsBuilderMIPS64::VisitMathRoundDouble(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->AddTemp(Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
@@ -989,7 +979,7 @@
// byte libcore.io.Memory.peekByte(long address)
void IntrinsicLocationsBuilderMIPS64::VisitMemoryPeekByte(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekByte(HInvoke* invoke) {
@@ -1002,7 +992,7 @@
// short libcore.io.Memory.peekShort(long address)
void IntrinsicLocationsBuilderMIPS64::VisitMemoryPeekShortNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekShortNative(HInvoke* invoke) {
@@ -1015,7 +1005,7 @@
// int libcore.io.Memory.peekInt(long address)
void IntrinsicLocationsBuilderMIPS64::VisitMemoryPeekIntNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekIntNative(HInvoke* invoke) {
@@ -1028,7 +1018,7 @@
// long libcore.io.Memory.peekLong(long address)
void IntrinsicLocationsBuilderMIPS64::VisitMemoryPeekLongNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekLongNative(HInvoke* invoke) {
@@ -1039,17 +1029,16 @@
__ Ld(out, adr, 0);
}
-static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
}
// void libcore.io.Memory.pokeByte(long address, byte value)
void IntrinsicLocationsBuilderMIPS64::VisitMemoryPokeByte(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeByte(HInvoke* invoke) {
@@ -1062,7 +1051,7 @@
// void libcore.io.Memory.pokeShort(long address, short value)
void IntrinsicLocationsBuilderMIPS64::VisitMemoryPokeShortNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeShortNative(HInvoke* invoke) {
@@ -1075,7 +1064,7 @@
// void libcore.io.Memory.pokeInt(long address, int value)
void IntrinsicLocationsBuilderMIPS64::VisitMemoryPokeIntNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeIntNative(HInvoke* invoke) {
@@ -1088,7 +1077,7 @@
// void libcore.io.Memory.pokeLong(long address, long value)
void IntrinsicLocationsBuilderMIPS64::VisitMemoryPokeLongNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeLongNative(HInvoke* invoke) {
@@ -1101,9 +1090,8 @@
// Thread java.lang.Thread.currentThread()
void IntrinsicLocationsBuilderMIPS64::VisitThreadCurrentThread(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetOut(Location::RequiresRegister());
}
@@ -1117,17 +1105,18 @@
Thread::PeerOffset<kMips64PointerSize>().Int32Value());
}
-static void CreateIntIntIntToIntLocations(ArenaAllocator* arena,
+static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator,
HInvoke* invoke,
DataType::Type type) {
bool can_call = kEmitCompilerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
if (can_call && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -1227,7 +1216,7 @@
// int sun.misc.Unsafe.getInt(Object o, long offset)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGet(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGet(HInvoke* invoke) {
@@ -1236,7 +1225,7 @@
// int sun.misc.Unsafe.getIntVolatile(Object o, long offset)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) {
@@ -1245,7 +1234,7 @@
// long sun.misc.Unsafe.getLong(Object o, long offset)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetLong(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLong(HInvoke* invoke) {
@@ -1254,7 +1243,7 @@
// long sun.misc.Unsafe.getLongVolatile(Object o, long offset)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
@@ -1263,7 +1252,7 @@
// Object sun.misc.Unsafe.getObject(Object o, long offset)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetObject(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObject(HInvoke* invoke) {
@@ -1272,17 +1261,16 @@
// Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
}
-static void CreateIntIntIntIntToVoid(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -1341,7 +1329,7 @@
// void sun.misc.Unsafe.putInt(Object o, long offset, int x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafePut(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePut(HInvoke* invoke) {
@@ -1354,7 +1342,7 @@
// void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) {
@@ -1367,7 +1355,7 @@
// void sun.misc.Unsafe.putIntVolatile(Object o, long offset, int x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) {
@@ -1380,7 +1368,7 @@
// void sun.misc.Unsafe.putObject(Object o, long offset, Object x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObject(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObject(HInvoke* invoke) {
@@ -1393,7 +1381,7 @@
// void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
@@ -1406,7 +1394,7 @@
// void sun.misc.Unsafe.putObjectVolatile(Object o, long offset, Object x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
@@ -1419,7 +1407,7 @@
// void sun.misc.Unsafe.putLong(Object o, long offset, long x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLong(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLong(HInvoke* invoke) {
@@ -1432,7 +1420,7 @@
// void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
@@ -1445,7 +1433,7 @@
// void sun.misc.Unsafe.putLongVolatile(Object o, long offset, long x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoid(arena_, invoke);
+ CreateIntIntIntIntToVoid(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
@@ -1456,15 +1444,16 @@
codegen_);
}
-static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* allocator, HInvoke* invoke) {
bool can_call = kEmitCompilerReadBarrier &&
kUseBakerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -1583,7 +1572,7 @@
// boolean sun.misc.Unsafe.compareAndSwapInt(Object o, long offset, int expected, int x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeCASInt(HInvoke* invoke) {
- CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
+ CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASInt(HInvoke* invoke) {
@@ -1592,7 +1581,7 @@
// boolean sun.misc.Unsafe.compareAndSwapLong(Object o, long offset, long expected, long x)
void IntrinsicLocationsBuilderMIPS64::VisitUnsafeCASLong(HInvoke* invoke) {
- CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
+ CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASLong(HInvoke* invoke) {
@@ -1607,7 +1596,7 @@
return;
}
- CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
+ CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASObject(HInvoke* invoke) {
@@ -1620,9 +1609,8 @@
// int java.lang.String.compareTo(String anotherString)
void IntrinsicLocationsBuilderMIPS64::VisitStringCompareTo(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -1648,9 +1636,8 @@
// boolean java.lang.String.equals(Object anObject)
void IntrinsicLocationsBuilderMIPS64::VisitStringEquals(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
@@ -1814,9 +1801,8 @@
// int java.lang.String.indexOf(int ch)
void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOf(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime
// calling convention. So it's best to align the inputs accordingly.
InvokeRuntimeCallingConvention calling_convention;
@@ -1835,9 +1821,8 @@
// int java.lang.String.indexOf(int ch, int fromIndex)
void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
// We have a hand-crafted assembly stub that follows the runtime
// calling convention. So it's best to align the inputs accordingly.
InvokeRuntimeCallingConvention calling_convention;
@@ -1855,9 +1840,8 @@
// java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromBytes(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -1883,9 +1867,8 @@
// java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromChars(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -1907,9 +1890,8 @@
// java.lang.StringFactory.newStringFromString(String toCopy)
void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromString(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32);
@@ -1948,7 +1930,7 @@
// boolean java.lang.Float.isInfinite(float)
void IntrinsicLocationsBuilderMIPS64::VisitFloatIsInfinite(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitFloatIsInfinite(HInvoke* invoke) {
@@ -1957,7 +1939,7 @@
// boolean java.lang.Double.isInfinite(double)
void IntrinsicLocationsBuilderMIPS64::VisitDoubleIsInfinite(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitDoubleIsInfinite(HInvoke* invoke) {
@@ -1966,9 +1948,8 @@
// void java.lang.String.getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin)
void IntrinsicLocationsBuilderMIPS64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -2083,7 +2064,7 @@
// Okay, it is safe to generate inline code.
LocationSummary* locations =
- new (arena_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
// arraycopy(Object src, int srcPos, Object dest, int destPos, int length).
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
@@ -2277,7 +2258,7 @@
// int java.lang.Integer.highestOneBit(int)
void IntrinsicLocationsBuilderMIPS64::VisitIntegerHighestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerHighestOneBit(HInvoke* invoke) {
@@ -2286,7 +2267,7 @@
// long java.lang.Long.highestOneBit(long)
void IntrinsicLocationsBuilderMIPS64::VisitLongHighestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitLongHighestOneBit(HInvoke* invoke) {
@@ -2311,7 +2292,7 @@
// int java.lang.Integer.lowestOneBit(int)
void IntrinsicLocationsBuilderMIPS64::VisitIntegerLowestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerLowestOneBit(HInvoke* invoke) {
@@ -2320,27 +2301,25 @@
// long java.lang.Long.lowestOneBit(long)
void IntrinsicLocationsBuilderMIPS64::VisitLongLowestOneBit(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitLongLowestOneBit(HInvoke* invoke) {
GenLowestOneBit(invoke->GetLocations(), DataType::Type::kInt64, GetAssembler());
}
-static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+static void CreateFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kFloat64));
}
-static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
@@ -2376,7 +2355,7 @@
// static double java.lang.Math.cos(double a)
void IntrinsicLocationsBuilderMIPS64::VisitMathCos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathCos(HInvoke* invoke) {
@@ -2385,7 +2364,7 @@
// static double java.lang.Math.sin(double a)
void IntrinsicLocationsBuilderMIPS64::VisitMathSin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathSin(HInvoke* invoke) {
@@ -2394,7 +2373,7 @@
// static double java.lang.Math.acos(double a)
void IntrinsicLocationsBuilderMIPS64::VisitMathAcos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathAcos(HInvoke* invoke) {
@@ -2403,7 +2382,7 @@
// static double java.lang.Math.asin(double a)
void IntrinsicLocationsBuilderMIPS64::VisitMathAsin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathAsin(HInvoke* invoke) {
@@ -2412,7 +2391,7 @@
// static double java.lang.Math.atan(double a)
void IntrinsicLocationsBuilderMIPS64::VisitMathAtan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathAtan(HInvoke* invoke) {
@@ -2421,7 +2400,7 @@
// static double java.lang.Math.atan2(double y, double x)
void IntrinsicLocationsBuilderMIPS64::VisitMathAtan2(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathAtan2(HInvoke* invoke) {
@@ -2430,7 +2409,7 @@
// static double java.lang.Math.cbrt(double a)
void IntrinsicLocationsBuilderMIPS64::VisitMathCbrt(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathCbrt(HInvoke* invoke) {
@@ -2439,7 +2418,7 @@
// static double java.lang.Math.cosh(double x)
void IntrinsicLocationsBuilderMIPS64::VisitMathCosh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathCosh(HInvoke* invoke) {
@@ -2448,7 +2427,7 @@
// static double java.lang.Math.exp(double a)
void IntrinsicLocationsBuilderMIPS64::VisitMathExp(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathExp(HInvoke* invoke) {
@@ -2457,7 +2436,7 @@
// static double java.lang.Math.expm1(double x)
void IntrinsicLocationsBuilderMIPS64::VisitMathExpm1(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathExpm1(HInvoke* invoke) {
@@ -2466,7 +2445,7 @@
// static double java.lang.Math.hypot(double x, double y)
void IntrinsicLocationsBuilderMIPS64::VisitMathHypot(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathHypot(HInvoke* invoke) {
@@ -2475,7 +2454,7 @@
// static double java.lang.Math.log(double a)
void IntrinsicLocationsBuilderMIPS64::VisitMathLog(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathLog(HInvoke* invoke) {
@@ -2484,7 +2463,7 @@
// static double java.lang.Math.log10(double x)
void IntrinsicLocationsBuilderMIPS64::VisitMathLog10(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathLog10(HInvoke* invoke) {
@@ -2493,7 +2472,7 @@
// static double java.lang.Math.nextAfter(double start, double direction)
void IntrinsicLocationsBuilderMIPS64::VisitMathNextAfter(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathNextAfter(HInvoke* invoke) {
@@ -2502,7 +2481,7 @@
// static double java.lang.Math.sinh(double x)
void IntrinsicLocationsBuilderMIPS64::VisitMathSinh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathSinh(HInvoke* invoke) {
@@ -2511,7 +2490,7 @@
// static double java.lang.Math.tan(double a)
void IntrinsicLocationsBuilderMIPS64::VisitMathTan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathTan(HInvoke* invoke) {
@@ -2520,7 +2499,7 @@
// static double java.lang.Math.tanh(double x)
void IntrinsicLocationsBuilderMIPS64::VisitMathTanh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathTanh(HInvoke* invoke) {
diff --git a/compiler/optimizing/intrinsics_mips64.h b/compiler/optimizing/intrinsics_mips64.h
index 6880a25..6085c7b 100644
--- a/compiler/optimizing/intrinsics_mips64.h
+++ b/compiler/optimizing/intrinsics_mips64.h
@@ -50,7 +50,7 @@
private:
CodeGeneratorMIPS64* codegen_;
- ArenaAllocator* arena_;
+ ArenaAllocator* allocator_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderMIPS64);
};
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index a591622..8b389ba 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -46,7 +46,7 @@
static constexpr int32_t kFloatNaN = INT32_C(0x7FC00000);
IntrinsicLocationsBuilderX86::IntrinsicLocationsBuilderX86(CodeGeneratorX86* codegen)
- : arena_(codegen->GetGraph()->GetArena()),
+ : allocator_(codegen->GetGraph()->GetAllocator()),
codegen_(codegen) {
}
@@ -56,7 +56,7 @@
}
ArenaAllocator* IntrinsicCodeGeneratorX86::GetAllocator() {
- return codegen_->GetGraph()->GetArena();
+ return codegen_->GetGraph()->GetAllocator();
}
bool IntrinsicLocationsBuilderX86::TryDispatch(HInvoke* invoke) {
@@ -175,10 +175,9 @@
#define __ assembler->
-static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke, bool is64bit) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToIntLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is64bit) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
if (is64bit) {
@@ -186,10 +185,9 @@
}
}
-static void CreateIntToFPLocations(ArenaAllocator* arena, HInvoke* invoke, bool is64bit) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is64bit) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresFpuRegister());
if (is64bit) {
@@ -230,10 +228,10 @@
}
void IntrinsicLocationsBuilderX86::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke, /* is64bit */ true);
+ CreateFPToIntLocations(allocator_, invoke, /* is64bit */ true);
}
void IntrinsicLocationsBuilderX86::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke, /* is64bit */ true);
+ CreateIntToFPLocations(allocator_, invoke, /* is64bit */ true);
}
void IntrinsicCodeGeneratorX86::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
@@ -244,10 +242,10 @@
}
void IntrinsicLocationsBuilderX86::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke, /* is64bit */ false);
+ CreateFPToIntLocations(allocator_, invoke, /* is64bit */ false);
}
void IntrinsicLocationsBuilderX86::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke, /* is64bit */ false);
+ CreateIntToFPLocations(allocator_, invoke, /* is64bit */ false);
}
void IntrinsicCodeGeneratorX86::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
@@ -257,26 +255,23 @@
MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
}
-static void CreateIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
-static void CreateLongToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateLongToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
-static void CreateLongToLongLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateLongToLongLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
}
@@ -302,7 +297,7 @@
}
void IntrinsicLocationsBuilderX86::VisitIntegerReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitIntegerReverseBytes(HInvoke* invoke) {
@@ -310,7 +305,7 @@
}
void IntrinsicLocationsBuilderX86::VisitLongReverseBytes(HInvoke* invoke) {
- CreateLongToLongLocations(arena_, invoke);
+ CreateLongToLongLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitLongReverseBytes(HInvoke* invoke) {
@@ -331,7 +326,7 @@
}
void IntrinsicLocationsBuilderX86::VisitShortReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitShortReverseBytes(HInvoke* invoke) {
@@ -342,11 +337,10 @@
// TODO: Consider Quick's way of doing Double abs through integer operations, as the immediate we
// need is 64b.
-static void CreateFloatToFloat(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateFloatToFloat(ArenaAllocator* allocator, HInvoke* invoke) {
// TODO: Enable memory operations when the assembler supports them.
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::SameAsFirstInput());
HInvokeStaticOrDirect* static_or_direct = invoke->AsInvokeStaticOrDirect();
@@ -401,7 +395,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathAbsDouble(HInvoke* invoke) {
- CreateFloatToFloat(arena_, invoke);
+ CreateFloatToFloat(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathAbsDouble(HInvoke* invoke) {
@@ -409,17 +403,16 @@
}
void IntrinsicLocationsBuilderX86::VisitMathAbsFloat(HInvoke* invoke) {
- CreateFloatToFloat(arena_, invoke);
+ CreateFloatToFloat(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathAbsFloat(HInvoke* invoke) {
MathAbsFP(invoke, /* is64bit */ false, GetAssembler(), codegen_);
}
-static void CreateAbsIntLocation(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateAbsIntLocation(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RegisterLocation(EAX));
locations->SetOut(Location::SameAsFirstInput());
locations->AddTemp(Location::RegisterLocation(EDX));
@@ -444,10 +437,9 @@
// The result is in EAX.
}
-static void CreateAbsLongLocation(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateAbsLongLocation(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
locations->AddTemp(Location::RequiresRegister());
@@ -480,7 +472,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathAbsInt(HInvoke* invoke) {
- CreateAbsIntLocation(arena_, invoke);
+ CreateAbsIntLocation(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathAbsInt(HInvoke* invoke) {
@@ -488,7 +480,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathAbsLong(HInvoke* invoke) {
- CreateAbsLongLocation(arena_, invoke);
+ CreateAbsLongLocation(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathAbsLong(HInvoke* invoke) {
@@ -598,10 +590,9 @@
__ Bind(&done);
}
-static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::RequiresFpuRegister());
// The following is sub-optimal, but all we can do for now. It would be fine to also accept
@@ -616,7 +607,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathMinDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathMinDoubleDouble(HInvoke* invoke) {
@@ -628,7 +619,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathMinFloatFloat(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathMinFloatFloat(HInvoke* invoke) {
@@ -640,7 +631,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathMaxDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathMaxDoubleDouble(HInvoke* invoke) {
@@ -652,7 +643,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathMaxFloatFloat(HInvoke* invoke) {
- CreateFPFPToFPLocations(arena_, invoke);
+ CreateFPFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathMaxFloatFloat(HInvoke* invoke) {
@@ -718,19 +709,17 @@
}
}
-static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
-static void CreateLongLongToLongLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateLongLongToLongLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
@@ -739,7 +728,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathMinIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathMinIntInt(HInvoke* invoke) {
@@ -747,7 +736,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathMinLongLong(HInvoke* invoke) {
- CreateLongLongToLongLocations(arena_, invoke);
+ CreateLongLongToLongLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathMinLongLong(HInvoke* invoke) {
@@ -755,7 +744,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathMaxIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathMaxIntInt(HInvoke* invoke) {
@@ -763,23 +752,22 @@
}
void IntrinsicLocationsBuilderX86::VisitMathMaxLongLong(HInvoke* invoke) {
- CreateLongLongToLongLocations(arena_, invoke);
+ CreateLongLongToLongLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathMaxLongLong(HInvoke* invoke) {
GenMinMax(invoke->GetLocations(), /* is_min */ false, /* is_long */ true, GetAssembler());
}
-static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister());
}
void IntrinsicLocationsBuilderX86::VisitMathSqrt(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathSqrt(HInvoke* invoke) {
@@ -805,18 +793,18 @@
}
}
-static void CreateSSE41FPToFPLocations(ArenaAllocator* arena,
- HInvoke* invoke,
- CodeGeneratorX86* codegen) {
+static void CreateSSE41FPToFPLocations(ArenaAllocator* allocator,
+ HInvoke* invoke,
+ CodeGeneratorX86* codegen) {
// Do we have instruction support?
if (codegen->GetInstructionSetFeatures().HasSSE4_1()) {
- CreateFPToFPLocations(arena, invoke);
+ CreateFPToFPLocations(allocator, invoke);
return;
}
// We have to fall back to a call to the intrinsic.
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(Location::FpuRegisterLocation(XMM0));
@@ -839,7 +827,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathCeil(HInvoke* invoke) {
- CreateSSE41FPToFPLocations(arena_, invoke, codegen_);
+ CreateSSE41FPToFPLocations(allocator_, invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitMathCeil(HInvoke* invoke) {
@@ -847,7 +835,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathFloor(HInvoke* invoke) {
- CreateSSE41FPToFPLocations(arena_, invoke, codegen_);
+ CreateSSE41FPToFPLocations(allocator_, invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitMathFloor(HInvoke* invoke) {
@@ -855,7 +843,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathRint(HInvoke* invoke) {
- CreateSSE41FPToFPLocations(arena_, invoke, codegen_);
+ CreateSSE41FPToFPLocations(allocator_, invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitMathRint(HInvoke* invoke) {
@@ -867,9 +855,8 @@
if (codegen_->GetInstructionSetFeatures().HasSSE4_1()) {
HInvokeStaticOrDirect* static_or_direct = invoke->AsInvokeStaticOrDirect();
DCHECK(static_or_direct != nullptr);
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
if (static_or_direct->HasSpecialInput() &&
invoke->InputAt(
@@ -883,8 +870,8 @@
}
// We have to fall back to a call to the intrinsic.
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(Location::RegisterLocation(EAX));
@@ -951,11 +938,9 @@
__ Bind(&done);
}
-static void CreateFPToFPCallLocations(ArenaAllocator* arena,
- HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+static void CreateFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(Location::FpuRegisterLocation(XMM0));
@@ -992,7 +977,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathCos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathCos(HInvoke* invoke) {
@@ -1000,7 +985,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathSin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathSin(HInvoke* invoke) {
@@ -1008,7 +993,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathAcos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathAcos(HInvoke* invoke) {
@@ -1016,7 +1001,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathAsin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathAsin(HInvoke* invoke) {
@@ -1024,7 +1009,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathAtan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathAtan(HInvoke* invoke) {
@@ -1032,7 +1017,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathCbrt(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathCbrt(HInvoke* invoke) {
@@ -1040,7 +1025,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathCosh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathCosh(HInvoke* invoke) {
@@ -1048,7 +1033,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathExp(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathExp(HInvoke* invoke) {
@@ -1056,7 +1041,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathExpm1(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathExpm1(HInvoke* invoke) {
@@ -1064,7 +1049,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathLog(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathLog(HInvoke* invoke) {
@@ -1072,7 +1057,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathLog10(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathLog10(HInvoke* invoke) {
@@ -1080,7 +1065,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathSinh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathSinh(HInvoke* invoke) {
@@ -1088,7 +1073,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathTan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathTan(HInvoke* invoke) {
@@ -1096,18 +1081,16 @@
}
void IntrinsicLocationsBuilderX86::VisitMathTanh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathTanh(HInvoke* invoke) {
GenFPToFPCall(invoke, codegen_, kQuickTanh);
}
-static void CreateFPFPToFPCallLocations(ArenaAllocator* arena,
- HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
@@ -1115,7 +1098,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathAtan2(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathAtan2(HInvoke* invoke) {
@@ -1123,7 +1106,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathHypot(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathHypot(HInvoke* invoke) {
@@ -1131,7 +1114,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMathNextAfter(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMathNextAfter(HInvoke* invoke) {
@@ -1174,7 +1157,7 @@
// Okay, it is safe to generate inline code.
LocationSummary* locations =
- new (arena_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
// arraycopy(Object src, int srcPos, Object dest, int destPos, int length).
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
@@ -1336,9 +1319,8 @@
void IntrinsicLocationsBuilderX86::VisitStringCompareTo(HInvoke* invoke) {
// The inputs plus one temp.
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -1363,9 +1345,8 @@
}
void IntrinsicLocationsBuilderX86::VisitStringEquals(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
@@ -1655,7 +1636,7 @@
}
void IntrinsicLocationsBuilderX86::VisitStringIndexOf(HInvoke* invoke) {
- CreateStringIndexOfLocations(invoke, arena_, /* start_at_zero */ true);
+ CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ true);
}
void IntrinsicCodeGeneratorX86::VisitStringIndexOf(HInvoke* invoke) {
@@ -1663,7 +1644,7 @@
}
void IntrinsicLocationsBuilderX86::VisitStringIndexOfAfter(HInvoke* invoke) {
- CreateStringIndexOfLocations(invoke, arena_, /* start_at_zero */ false);
+ CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ false);
}
void IntrinsicCodeGeneratorX86::VisitStringIndexOfAfter(HInvoke* invoke) {
@@ -1672,9 +1653,8 @@
}
void IntrinsicLocationsBuilderX86::VisitStringNewStringFromBytes(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -1699,9 +1679,8 @@
}
void IntrinsicLocationsBuilderX86::VisitStringNewStringFromChars(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -1721,9 +1700,8 @@
}
void IntrinsicLocationsBuilderX86::VisitStringNewStringFromString(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetOut(Location::RegisterLocation(EAX));
@@ -1746,9 +1724,8 @@
void IntrinsicLocationsBuilderX86::VisitStringGetCharsNoCheck(HInvoke* invoke) {
// public void getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin);
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
// Place srcEnd in ECX to save a move below.
@@ -1875,7 +1852,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMemoryPeekByte(HInvoke* invoke) {
- CreateLongToIntLocations(arena_, invoke);
+ CreateLongToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMemoryPeekByte(HInvoke* invoke) {
@@ -1883,7 +1860,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMemoryPeekIntNative(HInvoke* invoke) {
- CreateLongToIntLocations(arena_, invoke);
+ CreateLongToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMemoryPeekIntNative(HInvoke* invoke) {
@@ -1891,7 +1868,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMemoryPeekLongNative(HInvoke* invoke) {
- CreateLongToLongLocations(arena_, invoke);
+ CreateLongToLongLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMemoryPeekLongNative(HInvoke* invoke) {
@@ -1899,18 +1876,18 @@
}
void IntrinsicLocationsBuilderX86::VisitMemoryPeekShortNative(HInvoke* invoke) {
- CreateLongToIntLocations(arena_, invoke);
+ CreateLongToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMemoryPeekShortNative(HInvoke* invoke) {
GenPeek(invoke->GetLocations(), DataType::Type::kInt16, GetAssembler());
}
-static void CreateLongIntToVoidLocations(ArenaAllocator* arena, DataType::Type size,
+static void CreateLongIntToVoidLocations(ArenaAllocator* allocator,
+ DataType::Type size,
HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
HInstruction* value = invoke->InputAt(1);
if (size == DataType::Type::kInt8) {
@@ -1967,7 +1944,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMemoryPokeByte(HInvoke* invoke) {
- CreateLongIntToVoidLocations(arena_, DataType::Type::kInt8, invoke);
+ CreateLongIntToVoidLocations(allocator_, DataType::Type::kInt8, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMemoryPokeByte(HInvoke* invoke) {
@@ -1975,7 +1952,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMemoryPokeIntNative(HInvoke* invoke) {
- CreateLongIntToVoidLocations(arena_, DataType::Type::kInt32, invoke);
+ CreateLongIntToVoidLocations(allocator_, DataType::Type::kInt32, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMemoryPokeIntNative(HInvoke* invoke) {
@@ -1983,7 +1960,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMemoryPokeLongNative(HInvoke* invoke) {
- CreateLongIntToVoidLocations(arena_, DataType::Type::kInt64, invoke);
+ CreateLongIntToVoidLocations(allocator_, DataType::Type::kInt64, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMemoryPokeLongNative(HInvoke* invoke) {
@@ -1991,7 +1968,7 @@
}
void IntrinsicLocationsBuilderX86::VisitMemoryPokeShortNative(HInvoke* invoke) {
- CreateLongIntToVoidLocations(arena_, DataType::Type::kInt16, invoke);
+ CreateLongIntToVoidLocations(allocator_, DataType::Type::kInt16, invoke);
}
void IntrinsicCodeGeneratorX86::VisitMemoryPokeShortNative(HInvoke* invoke) {
@@ -1999,9 +1976,8 @@
}
void IntrinsicLocationsBuilderX86::VisitThreadCurrentThread(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetOut(Location::RequiresRegister());
}
@@ -2071,18 +2047,19 @@
}
}
-static void CreateIntIntIntToIntLocations(ArenaAllocator* arena,
+static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator,
HInvoke* invoke,
DataType::Type type,
bool is_volatile) {
bool can_call = kEmitCompilerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
if (can_call && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -2104,23 +2081,26 @@
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGet(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32, /* is_volatile */ false);
+ CreateIntIntIntToIntLocations(
+ allocator_, invoke, DataType::Type::kInt32, /* is_volatile */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt32, /* is_volatile */ true);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32, /* is_volatile */ true);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetLong(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64, /* is_volatile */ false);
+ CreateIntIntIntToIntLocations(
+ allocator_, invoke, DataType::Type::kInt64, /* is_volatile */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kInt64, /* is_volatile */ true);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64, /* is_volatile */ true);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetObject(HInvoke* invoke) {
CreateIntIntIntToIntLocations(
- arena_, invoke, DataType::Type::kReference, /* is_volatile */ false);
+ allocator_, invoke, DataType::Type::kReference, /* is_volatile */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke, DataType::Type::kReference, /* is_volatile */ true);
+ CreateIntIntIntToIntLocations(
+ allocator_, invoke, DataType::Type::kReference, /* is_volatile */ true);
}
@@ -2144,13 +2124,12 @@
}
-static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* arena,
+static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* allocator,
DataType::Type type,
HInvoke* invoke,
bool is_volatile) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -2168,39 +2147,39 @@
void IntrinsicLocationsBuilderX86::VisitUnsafePut(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- arena_, DataType::Type::kInt32, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kInt32, invoke, /* is_volatile */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- arena_, DataType::Type::kInt32, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kInt32, invoke, /* is_volatile */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- arena_, DataType::Type::kInt32, invoke, /* is_volatile */ true);
+ allocator_, DataType::Type::kInt32, invoke, /* is_volatile */ true);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutObject(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- arena_, DataType::Type::kReference, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kReference, invoke, /* is_volatile */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- arena_, DataType::Type::kReference, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kReference, invoke, /* is_volatile */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- arena_, DataType::Type::kReference, invoke, /* is_volatile */ true);
+ allocator_, DataType::Type::kReference, invoke, /* is_volatile */ true);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutLong(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- arena_, DataType::Type::kInt64, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kInt64, invoke, /* is_volatile */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutLongOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- arena_, DataType::Type::kInt64, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kInt64, invoke, /* is_volatile */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutLongVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- arena_, DataType::Type::kInt64, invoke, /* is_volatile */ true);
+ allocator_, DataType::Type::kInt64, invoke, /* is_volatile */ true);
}
// We don't care for ordered: it requires an AnyStore barrier, which is already given by the x86
@@ -2282,17 +2261,18 @@
GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ true, codegen_);
}
-static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena,
+static void CreateIntIntIntIntIntToInt(ArenaAllocator* allocator,
DataType::Type type,
HInvoke* invoke) {
bool can_call = kEmitCompilerReadBarrier &&
kUseBakerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
// Offset is a long, but in 32 bit mode, we only need the low word.
@@ -2320,11 +2300,11 @@
}
void IntrinsicLocationsBuilderX86::VisitUnsafeCASInt(HInvoke* invoke) {
- CreateIntIntIntIntIntToInt(arena_, DataType::Type::kInt32, invoke);
+ CreateIntIntIntIntIntToInt(allocator_, DataType::Type::kInt32, invoke);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeCASLong(HInvoke* invoke) {
- CreateIntIntIntIntIntToInt(arena_, DataType::Type::kInt64, invoke);
+ CreateIntIntIntIntIntToInt(allocator_, DataType::Type::kInt64, invoke);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeCASObject(HInvoke* invoke) {
@@ -2334,7 +2314,7 @@
return;
}
- CreateIntIntIntIntIntToInt(arena_, DataType::Type::kReference, invoke);
+ CreateIntIntIntIntIntToInt(allocator_, DataType::Type::kReference, invoke);
}
static void GenCAS(DataType::Type type, HInvoke* invoke, CodeGeneratorX86* codegen) {
@@ -2473,9 +2453,8 @@
}
void IntrinsicLocationsBuilderX86::VisitIntegerReverse(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
locations->AddTemp(Location::RequiresRegister());
@@ -2516,9 +2495,8 @@
}
void IntrinsicLocationsBuilderX86::VisitLongReverse(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
locations->AddTemp(Location::RequiresRegister());
@@ -2553,15 +2531,14 @@
}
static void CreateBitCountLocations(
- ArenaAllocator* arena, CodeGeneratorX86* codegen, HInvoke* invoke, bool is_long) {
+ ArenaAllocator* allocator, CodeGeneratorX86* codegen, HInvoke* invoke, bool is_long) {
if (!codegen->GetInstructionSetFeatures().HasPopCnt()) {
// Do nothing if there is no popcnt support. This results in generating
// a call for the intrinsic rather than direct code.
return;
}
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
if (is_long) {
locations->AddTemp(Location::RequiresRegister());
}
@@ -2610,7 +2587,7 @@
}
void IntrinsicLocationsBuilderX86::VisitIntegerBitCount(HInvoke* invoke) {
- CreateBitCountLocations(arena_, codegen_, invoke, /* is_long */ false);
+ CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long */ false);
}
void IntrinsicCodeGeneratorX86::VisitIntegerBitCount(HInvoke* invoke) {
@@ -2618,17 +2595,16 @@
}
void IntrinsicLocationsBuilderX86::VisitLongBitCount(HInvoke* invoke) {
- CreateBitCountLocations(arena_, codegen_, invoke, /* is_long */ true);
+ CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long */ true);
}
void IntrinsicCodeGeneratorX86::VisitLongBitCount(HInvoke* invoke) {
GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ true);
}
-static void CreateLeadingZeroLocations(ArenaAllocator* arena, HInvoke* invoke, bool is_long) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateLeadingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is_long) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
if (is_long) {
locations->SetInAt(0, Location::RequiresRegister());
} else {
@@ -2715,7 +2691,7 @@
}
void IntrinsicLocationsBuilderX86::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- CreateLeadingZeroLocations(arena_, invoke, /* is_long */ false);
+ CreateLeadingZeroLocations(allocator_, invoke, /* is_long */ false);
}
void IntrinsicCodeGeneratorX86::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
@@ -2723,17 +2699,16 @@
}
void IntrinsicLocationsBuilderX86::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- CreateLeadingZeroLocations(arena_, invoke, /* is_long */ true);
+ CreateLeadingZeroLocations(allocator_, invoke, /* is_long */ true);
}
void IntrinsicCodeGeneratorX86::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
}
-static void CreateTrailingZeroLocations(ArenaAllocator* arena, HInvoke* invoke, bool is_long) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateTrailingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is_long) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
if (is_long) {
locations->SetInAt(0, Location::RequiresRegister());
} else {
@@ -2807,7 +2782,7 @@
}
void IntrinsicLocationsBuilderX86::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- CreateTrailingZeroLocations(arena_, invoke, /* is_long */ false);
+ CreateTrailingZeroLocations(allocator_, invoke, /* is_long */ false);
}
void IntrinsicCodeGeneratorX86::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
@@ -2815,7 +2790,7 @@
}
void IntrinsicLocationsBuilderX86::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- CreateTrailingZeroLocations(arena_, invoke, /* is_long */ true);
+ CreateTrailingZeroLocations(allocator_, invoke, /* is_long */ true);
}
void IntrinsicCodeGeneratorX86::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
@@ -3352,9 +3327,8 @@
}
void IntrinsicLocationsBuilderX86::VisitThreadInterrupted(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetOut(Location::RequiresRegister());
}
diff --git a/compiler/optimizing/intrinsics_x86.h b/compiler/optimizing/intrinsics_x86.h
index 22f11b1..ba3ca0a 100644
--- a/compiler/optimizing/intrinsics_x86.h
+++ b/compiler/optimizing/intrinsics_x86.h
@@ -49,7 +49,7 @@
bool TryDispatch(HInvoke* invoke);
private:
- ArenaAllocator* arena_;
+ ArenaAllocator* allocator_;
CodeGeneratorX86* codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderX86);
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index a2545ee..6337900 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -41,7 +41,7 @@
namespace x86_64 {
IntrinsicLocationsBuilderX86_64::IntrinsicLocationsBuilderX86_64(CodeGeneratorX86_64* codegen)
- : arena_(codegen->GetGraph()->GetArena()), codegen_(codegen) {
+ : allocator_(codegen->GetGraph()->GetAllocator()), codegen_(codegen) {
}
X86_64Assembler* IntrinsicCodeGeneratorX86_64::GetAssembler() {
@@ -49,7 +49,7 @@
}
ArenaAllocator* IntrinsicCodeGeneratorX86_64::GetAllocator() {
- return codegen_->GetGraph()->GetArena();
+ return codegen_->GetGraph()->GetAllocator();
}
bool IntrinsicLocationsBuilderX86_64::TryDispatch(HInvoke* invoke) {
@@ -128,18 +128,16 @@
#define __ assembler->
-static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
}
-static void CreateIntToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresFpuRegister());
}
@@ -157,10 +155,10 @@
}
void IntrinsicLocationsBuilderX86_64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke);
+ CreateIntToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
@@ -171,10 +169,10 @@
}
void IntrinsicLocationsBuilderX86_64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- CreateFPToIntLocations(arena_, invoke);
+ CreateFPToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- CreateIntToFPLocations(arena_, invoke);
+ CreateIntToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
@@ -184,10 +182,9 @@
MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
}
-static void CreateIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
@@ -216,7 +213,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitIntegerReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerReverseBytes(HInvoke* invoke) {
@@ -224,7 +221,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitLongReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitLongReverseBytes(HInvoke* invoke) {
@@ -232,7 +229,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitShortReverseBytes(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitShortReverseBytes(HInvoke* invoke) {
@@ -243,11 +240,10 @@
// TODO: Consider Quick's way of doing Double abs through integer operations, as the immediate we
// need is 64b.
-static void CreateFloatToFloatPlusTemps(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateFloatToFloatPlusTemps(ArenaAllocator* allocator, HInvoke* invoke) {
// TODO: Enable memory operations when the assembler supports them.
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::SameAsFirstInput());
locations->AddTemp(Location::RequiresFpuRegister()); // FP reg to hold mask.
@@ -275,7 +271,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathAbsDouble(HInvoke* invoke) {
- CreateFloatToFloatPlusTemps(arena_, invoke);
+ CreateFloatToFloatPlusTemps(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathAbsDouble(HInvoke* invoke) {
@@ -283,17 +279,16 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathAbsFloat(HInvoke* invoke) {
- CreateFloatToFloatPlusTemps(arena_, invoke);
+ CreateFloatToFloatPlusTemps(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathAbsFloat(HInvoke* invoke) {
MathAbsFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler(), codegen_);
}
-static void CreateIntToIntPlusTemp(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntToIntPlusTemp(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
locations->AddTemp(Location::RequiresRegister());
@@ -322,7 +317,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathAbsInt(HInvoke* invoke) {
- CreateIntToIntPlusTemp(arena_, invoke);
+ CreateIntToIntPlusTemp(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathAbsInt(HInvoke* invoke) {
@@ -330,7 +325,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathAbsLong(HInvoke* invoke) {
- CreateIntToIntPlusTemp(arena_, invoke);
+ CreateIntToIntPlusTemp(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathAbsLong(HInvoke* invoke) {
@@ -421,10 +416,9 @@
__ Bind(&done);
}
-static void CreateFPFPToFP(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPFPToFP(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetInAt(1, Location::RequiresFpuRegister());
// The following is sub-optimal, but all we can do for now. It would be fine to also accept
@@ -433,7 +427,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathMinDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFP(arena_, invoke);
+ CreateFPFPToFP(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathMinDoubleDouble(HInvoke* invoke) {
@@ -442,7 +436,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathMinFloatFloat(HInvoke* invoke) {
- CreateFPFPToFP(arena_, invoke);
+ CreateFPFPToFP(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathMinFloatFloat(HInvoke* invoke) {
@@ -451,7 +445,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
- CreateFPFPToFP(arena_, invoke);
+ CreateFPFPToFP(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
@@ -460,7 +454,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathMaxFloatFloat(HInvoke* invoke) {
- CreateFPFPToFP(arena_, invoke);
+ CreateFPFPToFP(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathMaxFloatFloat(HInvoke* invoke) {
@@ -500,17 +494,16 @@
__ cmov(is_min ? Condition::kGreater : Condition::kLess, out, op2, is_long);
}
-static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
void IntrinsicLocationsBuilderX86_64::VisitMathMinIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathMinIntInt(HInvoke* invoke) {
@@ -518,7 +511,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathMinLongLong(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathMinLongLong(HInvoke* invoke) {
@@ -526,7 +519,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathMaxIntInt(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathMaxIntInt(HInvoke* invoke) {
@@ -534,23 +527,22 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathMaxLongLong(HInvoke* invoke) {
- CreateIntIntToIntLocations(arena_, invoke);
+ CreateIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathMaxLongLong(HInvoke* invoke) {
GenMinMax(invoke->GetLocations(), /* is_min */ false, /* is_long */ true, GetAssembler());
}
-static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister());
}
void IntrinsicLocationsBuilderX86_64::VisitMathSqrt(HInvoke* invoke) {
- CreateFPToFPLocations(arena_, invoke);
+ CreateFPToFPLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathSqrt(HInvoke* invoke) {
@@ -576,18 +568,18 @@
}
}
-static void CreateSSE41FPToFPLocations(ArenaAllocator* arena,
- HInvoke* invoke,
- CodeGeneratorX86_64* codegen) {
+static void CreateSSE41FPToFPLocations(ArenaAllocator* allocator,
+ HInvoke* invoke,
+ CodeGeneratorX86_64* codegen) {
// Do we have instruction support?
if (codegen->GetInstructionSetFeatures().HasSSE4_1()) {
- CreateFPToFPLocations(arena, invoke);
+ CreateFPToFPLocations(allocator, invoke);
return;
}
// We have to fall back to a call to the intrinsic.
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(Location::FpuRegisterLocation(XMM0));
@@ -610,7 +602,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathCeil(HInvoke* invoke) {
- CreateSSE41FPToFPLocations(arena_, invoke, codegen_);
+ CreateSSE41FPToFPLocations(allocator_, invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitMathCeil(HInvoke* invoke) {
@@ -618,7 +610,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathFloor(HInvoke* invoke) {
- CreateSSE41FPToFPLocations(arena_, invoke, codegen_);
+ CreateSSE41FPToFPLocations(allocator_, invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitMathFloor(HInvoke* invoke) {
@@ -626,21 +618,20 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathRint(HInvoke* invoke) {
- CreateSSE41FPToFPLocations(arena_, invoke, codegen_);
+ CreateSSE41FPToFPLocations(allocator_, invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitMathRint(HInvoke* invoke) {
GenSSE41FPToFPIntrinsic(codegen_, invoke, GetAssembler(), 0);
}
-static void CreateSSE41FPToIntLocations(ArenaAllocator* arena,
- HInvoke* invoke,
- CodeGeneratorX86_64* codegen) {
+static void CreateSSE41FPToIntLocations(ArenaAllocator* allocator,
+ HInvoke* invoke,
+ CodeGeneratorX86_64* codegen) {
// Do we have instruction support?
if (codegen->GetInstructionSetFeatures().HasSSE4_1()) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresRegister());
locations->AddTemp(Location::RequiresFpuRegister());
@@ -649,8 +640,8 @@
}
// We have to fall back to a call to the intrinsic.
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(Location::RegisterLocation(RAX));
@@ -659,7 +650,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathRoundFloat(HInvoke* invoke) {
- CreateSSE41FPToIntLocations(arena_, invoke, codegen_);
+ CreateSSE41FPToIntLocations(allocator_, invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitMathRoundFloat(HInvoke* invoke) {
@@ -703,7 +694,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathRoundDouble(HInvoke* invoke) {
- CreateSSE41FPToIntLocations(arena_, invoke, codegen_);
+ CreateSSE41FPToIntLocations(allocator_, invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitMathRoundDouble(HInvoke* invoke) {
@@ -746,11 +737,9 @@
__ Bind(&done);
}
-static void CreateFPToFPCallLocations(ArenaAllocator* arena,
- HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+static void CreateFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetOut(Location::FpuRegisterLocation(XMM0));
@@ -773,7 +762,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathCos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathCos(HInvoke* invoke) {
@@ -781,7 +770,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathSin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathSin(HInvoke* invoke) {
@@ -789,7 +778,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathAcos(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathAcos(HInvoke* invoke) {
@@ -797,7 +786,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathAsin(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathAsin(HInvoke* invoke) {
@@ -805,7 +794,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathAtan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathAtan(HInvoke* invoke) {
@@ -813,7 +802,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathCbrt(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathCbrt(HInvoke* invoke) {
@@ -821,7 +810,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathCosh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathCosh(HInvoke* invoke) {
@@ -829,7 +818,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathExp(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathExp(HInvoke* invoke) {
@@ -837,7 +826,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathExpm1(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathExpm1(HInvoke* invoke) {
@@ -845,7 +834,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathLog(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathLog(HInvoke* invoke) {
@@ -853,7 +842,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathLog10(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathLog10(HInvoke* invoke) {
@@ -861,7 +850,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathSinh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathSinh(HInvoke* invoke) {
@@ -869,7 +858,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathTan(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathTan(HInvoke* invoke) {
@@ -877,18 +866,16 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathTanh(HInvoke* invoke) {
- CreateFPToFPCallLocations(arena_, invoke);
+ CreateFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathTanh(HInvoke* invoke) {
GenFPToFPCall(invoke, codegen_, kQuickTanh);
}
-static void CreateFPFPToFPCallLocations(ArenaAllocator* arena,
- HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
@@ -903,7 +890,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathAtan2(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathAtan2(HInvoke* invoke) {
@@ -911,7 +898,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathHypot(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathHypot(HInvoke* invoke) {
@@ -919,7 +906,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathNextAfter(HInvoke* invoke) {
- CreateFPFPToFPCallLocations(arena_, invoke);
+ CreateFPFPToFPCallLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMathNextAfter(HInvoke* invoke) {
@@ -949,9 +936,8 @@
}
}
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnSlowPath,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
// arraycopy(Object src, int src_pos, Object dest, int dest_pos, int length).
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
@@ -1507,9 +1493,8 @@
}
void IntrinsicLocationsBuilderX86_64::VisitStringCompareTo(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -1534,9 +1519,8 @@
}
void IntrinsicLocationsBuilderX86_64::VisitStringEquals(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
@@ -1812,7 +1796,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitStringIndexOf(HInvoke* invoke) {
- CreateStringIndexOfLocations(invoke, arena_, /* start_at_zero */ true);
+ CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ true);
}
void IntrinsicCodeGeneratorX86_64::VisitStringIndexOf(HInvoke* invoke) {
@@ -1820,7 +1804,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitStringIndexOfAfter(HInvoke* invoke) {
- CreateStringIndexOfLocations(invoke, arena_, /* start_at_zero */ false);
+ CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ false);
}
void IntrinsicCodeGeneratorX86_64::VisitStringIndexOfAfter(HInvoke* invoke) {
@@ -1829,9 +1813,8 @@
}
void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromBytes(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -1856,9 +1839,8 @@
}
void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromChars(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
@@ -1878,9 +1860,8 @@
}
void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromString(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainAndSlowPath,
- kIntrinsified);
+ LocationSummary* locations = new (allocator_) LocationSummary(
+ invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetOut(Location::RegisterLocation(RAX));
@@ -1903,9 +1884,8 @@
void IntrinsicLocationsBuilderX86_64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
// public void getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin);
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
locations->SetInAt(2, Location::RequiresRegister());
@@ -2018,7 +1998,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMemoryPeekByte(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMemoryPeekByte(HInvoke* invoke) {
@@ -2026,7 +2006,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMemoryPeekIntNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMemoryPeekIntNative(HInvoke* invoke) {
@@ -2034,7 +2014,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMemoryPeekLongNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMemoryPeekLongNative(HInvoke* invoke) {
@@ -2042,17 +2022,16 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMemoryPeekShortNative(HInvoke* invoke) {
- CreateIntToIntLocations(arena_, invoke);
+ CreateIntToIntLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMemoryPeekShortNative(HInvoke* invoke) {
GenPeek(invoke->GetLocations(), DataType::Type::kInt16, GetAssembler());
}
-static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrInt32Constant(invoke->InputAt(1)));
}
@@ -2104,7 +2083,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMemoryPokeByte(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeByte(HInvoke* invoke) {
@@ -2112,7 +2091,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMemoryPokeIntNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeIntNative(HInvoke* invoke) {
@@ -2120,7 +2099,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMemoryPokeLongNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeLongNative(HInvoke* invoke) {
@@ -2128,7 +2107,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMemoryPokeShortNative(HInvoke* invoke) {
- CreateIntIntToVoidLocations(arena_, invoke);
+ CreateIntIntToVoidLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitMemoryPokeShortNative(HInvoke* invoke) {
@@ -2136,9 +2115,8 @@
}
void IntrinsicLocationsBuilderX86_64::VisitThreadCurrentThread(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetOut(Location::RequiresRegister());
}
@@ -2194,15 +2172,16 @@
}
}
-static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
bool can_call = kEmitCompilerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
if (can_call && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
@@ -2214,22 +2193,22 @@
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeGet(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetLong(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetObject(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(arena_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke);
}
@@ -2253,12 +2232,11 @@
}
-static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* arena,
+static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* allocator,
DataType::Type type,
HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -2271,31 +2249,31 @@
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafePut(HInvoke* invoke) {
- CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kInt32, invoke);
+ CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kInt32, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafePutOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kInt32, invoke);
+ CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kInt32, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafePutVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kInt32, invoke);
+ CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kInt32, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafePutObject(HInvoke* invoke) {
- CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kReference, invoke);
+ CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kReference, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kReference, invoke);
+ CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kReference, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kReference, invoke);
+ CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kReference, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafePutLong(HInvoke* invoke) {
- CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kInt64, invoke);
+ CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kInt64, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
- CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kInt64, invoke);
+ CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kInt64, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
- CreateIntIntIntIntToVoidPlusTempsLocations(arena_, DataType::Type::kInt64, invoke);
+ CreateIntIntIntIntToVoidPlusTempsLocations(allocator_, DataType::Type::kInt64, invoke);
}
// We don't care for ordered: it requires an AnyStore barrier, which is already given by the x86
@@ -2363,17 +2341,18 @@
GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ true, codegen_);
}
-static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena,
+static void CreateIntIntIntIntIntToInt(ArenaAllocator* allocator,
DataType::Type type,
HInvoke* invoke) {
bool can_call = kEmitCompilerReadBarrier &&
kUseBakerReadBarrier &&
(invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- (can_call
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall),
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke,
+ can_call
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall,
+ kIntrinsified);
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
@@ -2391,11 +2370,11 @@
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeCASInt(HInvoke* invoke) {
- CreateIntIntIntIntIntToInt(arena_, DataType::Type::kInt32, invoke);
+ CreateIntIntIntIntIntToInt(allocator_, DataType::Type::kInt32, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeCASLong(HInvoke* invoke) {
- CreateIntIntIntIntIntToInt(arena_, DataType::Type::kInt64, invoke);
+ CreateIntIntIntIntIntToInt(allocator_, DataType::Type::kInt64, invoke);
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeCASObject(HInvoke* invoke) {
@@ -2405,7 +2384,7 @@
return;
}
- CreateIntIntIntIntIntToInt(arena_, DataType::Type::kReference, invoke);
+ CreateIntIntIntIntIntToInt(allocator_, DataType::Type::kReference, invoke);
}
static void GenCAS(DataType::Type type, HInvoke* invoke, CodeGeneratorX86_64* codegen) {
@@ -2537,9 +2516,8 @@
}
void IntrinsicLocationsBuilderX86_64::VisitIntegerReverse(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
locations->AddTemp(Location::RequiresRegister());
@@ -2580,9 +2558,8 @@
}
void IntrinsicLocationsBuilderX86_64::VisitLongReverse(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
locations->AddTemp(Location::RequiresRegister());
@@ -2625,15 +2602,14 @@
}
static void CreateBitCountLocations(
- ArenaAllocator* arena, CodeGeneratorX86_64* codegen, HInvoke* invoke) {
+ ArenaAllocator* allocator, CodeGeneratorX86_64* codegen, HInvoke* invoke) {
if (!codegen->GetInstructionSetFeatures().HasPopCnt()) {
// Do nothing if there is no popcnt support. This results in generating
// a call for the intrinsic rather than direct code.
return;
}
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresRegister());
}
@@ -2672,7 +2648,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitIntegerBitCount(HInvoke* invoke) {
- CreateBitCountLocations(arena_, codegen_, invoke);
+ CreateBitCountLocations(allocator_, codegen_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerBitCount(HInvoke* invoke) {
@@ -2680,17 +2656,16 @@
}
void IntrinsicLocationsBuilderX86_64::VisitLongBitCount(HInvoke* invoke) {
- CreateBitCountLocations(arena_, codegen_, invoke);
+ CreateBitCountLocations(allocator_, codegen_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitLongBitCount(HInvoke* invoke) {
GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ true);
}
-static void CreateOneBitLocations(ArenaAllocator* arena, HInvoke* invoke, bool is_high) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateOneBitLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is_high) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresRegister());
locations->AddTemp(is_high ? Location::RegisterLocation(RCX) // needs CL
@@ -2787,7 +2762,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitIntegerHighestOneBit(HInvoke* invoke) {
- CreateOneBitLocations(arena_, invoke, /* is_high */ true);
+ CreateOneBitLocations(allocator_, invoke, /* is_high */ true);
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerHighestOneBit(HInvoke* invoke) {
@@ -2795,7 +2770,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitLongHighestOneBit(HInvoke* invoke) {
- CreateOneBitLocations(arena_, invoke, /* is_high */ true);
+ CreateOneBitLocations(allocator_, invoke, /* is_high */ true);
}
void IntrinsicCodeGeneratorX86_64::VisitLongHighestOneBit(HInvoke* invoke) {
@@ -2803,7 +2778,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitIntegerLowestOneBit(HInvoke* invoke) {
- CreateOneBitLocations(arena_, invoke, /* is_high */ false);
+ CreateOneBitLocations(allocator_, invoke, /* is_high */ false);
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerLowestOneBit(HInvoke* invoke) {
@@ -2811,17 +2786,16 @@
}
void IntrinsicLocationsBuilderX86_64::VisitLongLowestOneBit(HInvoke* invoke) {
- CreateOneBitLocations(arena_, invoke, /* is_high */ false);
+ CreateOneBitLocations(allocator_, invoke, /* is_high */ false);
}
void IntrinsicCodeGeneratorX86_64::VisitLongLowestOneBit(HInvoke* invoke) {
GenOneBit(GetAssembler(), codegen_, invoke, /* is_high */ false, /* is_long */ true);
}
-static void CreateLeadingZeroLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateLeadingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresRegister());
}
@@ -2877,7 +2851,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- CreateLeadingZeroLocations(arena_, invoke);
+ CreateLeadingZeroLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
@@ -2885,17 +2859,16 @@
}
void IntrinsicLocationsBuilderX86_64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- CreateLeadingZeroLocations(arena_, invoke);
+ CreateLeadingZeroLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
}
-static void CreateTrailingZeroLocations(ArenaAllocator* arena, HInvoke* invoke) {
- LocationSummary* locations = new (arena) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+static void CreateTrailingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresRegister());
}
@@ -2946,7 +2919,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- CreateTrailingZeroLocations(arena_, invoke);
+ CreateTrailingZeroLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
@@ -2954,7 +2927,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- CreateTrailingZeroLocations(arena_, invoke);
+ CreateTrailingZeroLocations(allocator_, invoke);
}
void IntrinsicCodeGeneratorX86_64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
@@ -3029,9 +3002,8 @@
}
void IntrinsicLocationsBuilderX86_64::VisitThreadInterrupted(HInvoke* invoke) {
- LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kNoCall,
- kIntrinsified);
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetOut(Location::RequiresRegister());
}
diff --git a/compiler/optimizing/intrinsics_x86_64.h b/compiler/optimizing/intrinsics_x86_64.h
index 4b28788..b0fbe91 100644
--- a/compiler/optimizing/intrinsics_x86_64.h
+++ b/compiler/optimizing/intrinsics_x86_64.h
@@ -49,7 +49,7 @@
bool TryDispatch(HInvoke* invoke);
private:
- ArenaAllocator* arena_;
+ ArenaAllocator* allocator_;
CodeGeneratorX86_64* codegen_;
DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderX86_64);
diff --git a/compiler/optimizing/licm.cc b/compiler/optimizing/licm.cc
index 10524b0..7af1a20 100644
--- a/compiler/optimizing/licm.cc
+++ b/compiler/optimizing/licm.cc
@@ -84,10 +84,10 @@
// Only used during debug.
ArenaBitVector* visited = nullptr;
if (kIsDebugBuild) {
- visited = new (graph_->GetArena()) ArenaBitVector(graph_->GetArena(),
- graph_->GetBlocks().size(),
- false,
- kArenaAllocLICM);
+ visited = new (graph_->GetAllocator()) ArenaBitVector(graph_->GetAllocator(),
+ graph_->GetBlocks().size(),
+ false,
+ kArenaAllocLICM);
}
// Post order visit to visit inner loops before outer loops.
diff --git a/compiler/optimizing/licm_test.cc b/compiler/optimizing/licm_test.cc
index 0617e60..adc3cab 100644
--- a/compiler/optimizing/licm_test.cc
+++ b/compiler/optimizing/licm_test.cc
@@ -27,12 +27,10 @@
/**
* Fixture class for the LICM tests.
*/
-class LICMTest : public CommonCompilerTest {
+class LICMTest : public OptimizingUnitTest {
public:
LICMTest()
- : pool_(),
- allocator_(&pool_),
- entry_(nullptr),
+ : entry_(nullptr),
loop_preheader_(nullptr),
loop_header_(nullptr),
loop_body_(nullptr),
@@ -41,7 +39,7 @@
parameter_(nullptr),
int_constant_(nullptr),
float_constant_(nullptr) {
- graph_ = CreateGraph(&allocator_);
+ graph_ = CreateGraph();
}
~LICMTest() { }
@@ -49,12 +47,12 @@
// Builds a singly-nested loop structure in CFG. Tests can further populate
// the basic blocks with instructions to set up interesting scenarios.
void BuildLoop() {
- entry_ = new (&allocator_) HBasicBlock(graph_);
- loop_preheader_ = new (&allocator_) HBasicBlock(graph_);
- loop_header_ = new (&allocator_) HBasicBlock(graph_);
- loop_body_ = new (&allocator_) HBasicBlock(graph_);
- return_ = new (&allocator_) HBasicBlock(graph_);
- exit_ = new (&allocator_) HBasicBlock(graph_);
+ entry_ = new (GetAllocator()) HBasicBlock(graph_);
+ loop_preheader_ = new (GetAllocator()) HBasicBlock(graph_);
+ loop_header_ = new (GetAllocator()) HBasicBlock(graph_);
+ loop_body_ = new (GetAllocator()) HBasicBlock(graph_);
+ return_ = new (GetAllocator()) HBasicBlock(graph_);
+ exit_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry_);
graph_->AddBlock(loop_preheader_);
@@ -75,18 +73,18 @@
return_->AddSuccessor(exit_);
// Provide boiler-plate instructions.
- parameter_ = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kReference);
+ parameter_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kReference);
entry_->AddInstruction(parameter_);
int_constant_ = graph_->GetIntConstant(42);
float_constant_ = graph_->GetFloatConstant(42.0f);
- loop_preheader_->AddInstruction(new (&allocator_) HGoto());
- loop_header_->AddInstruction(new (&allocator_) HIf(parameter_));
- loop_body_->AddInstruction(new (&allocator_) HGoto());
- return_->AddInstruction(new (&allocator_) HReturnVoid());
- exit_->AddInstruction(new (&allocator_) HExit());
+ loop_preheader_->AddInstruction(new (GetAllocator()) HGoto());
+ loop_header_->AddInstruction(new (GetAllocator()) HIf(parameter_));
+ loop_body_->AddInstruction(new (GetAllocator()) HGoto());
+ return_->AddInstruction(new (GetAllocator()) HReturnVoid());
+ exit_->AddInstruction(new (GetAllocator()) HExit());
}
// Performs LICM optimizations (after proper set up).
@@ -98,8 +96,6 @@
}
// General building fields.
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
// Specific basic blocks.
@@ -123,17 +119,17 @@
BuildLoop();
// Populate the loop with instructions: set/get field with different types.
- HInstruction* get_field = new (&allocator_) HInstanceFieldGet(parameter_,
- nullptr,
- DataType::Type::kInt64,
- MemberOffset(10),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph_->GetDexFile(),
- 0);
+ HInstruction* get_field = new (GetAllocator()) HInstanceFieldGet(parameter_,
+ nullptr,
+ DataType::Type::kInt64,
+ MemberOffset(10),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph_->GetDexFile(),
+ 0);
loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction());
- HInstruction* set_field = new (&allocator_) HInstanceFieldSet(
+ HInstruction* set_field = new (GetAllocator()) HInstanceFieldSet(
parameter_, int_constant_, nullptr, DataType::Type::kInt32, MemberOffset(20),
false, kUnknownFieldIndex, kUnknownClassDefIndex, graph_->GetDexFile(), 0);
loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction());
@@ -150,26 +146,26 @@
// Populate the loop with instructions: set/get field with same types.
ScopedNullHandle<mirror::DexCache> dex_cache;
- HInstruction* get_field = new (&allocator_) HInstanceFieldGet(parameter_,
- nullptr,
- DataType::Type::kInt64,
- MemberOffset(10),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph_->GetDexFile(),
- 0);
+ HInstruction* get_field = new (GetAllocator()) HInstanceFieldGet(parameter_,
+ nullptr,
+ DataType::Type::kInt64,
+ MemberOffset(10),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph_->GetDexFile(),
+ 0);
loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction());
- HInstruction* set_field = new (&allocator_) HInstanceFieldSet(parameter_,
- get_field,
- nullptr,
- DataType::Type::kInt64,
- MemberOffset(10),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph_->GetDexFile(),
- 0);
+ HInstruction* set_field = new (GetAllocator()) HInstanceFieldSet(parameter_,
+ get_field,
+ nullptr,
+ DataType::Type::kInt64,
+ MemberOffset(10),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph_->GetDexFile(),
+ 0);
loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction());
EXPECT_EQ(get_field->GetBlock(), loop_body_);
@@ -183,10 +179,10 @@
BuildLoop();
// Populate the loop with instructions: set/get array with different types.
- HInstruction* get_array = new (&allocator_) HArrayGet(
+ HInstruction* get_array = new (GetAllocator()) HArrayGet(
parameter_, int_constant_, DataType::Type::kInt32, 0);
loop_body_->InsertInstructionBefore(get_array, loop_body_->GetLastInstruction());
- HInstruction* set_array = new (&allocator_) HArraySet(
+ HInstruction* set_array = new (GetAllocator()) HArraySet(
parameter_, int_constant_, float_constant_, DataType::Type::kFloat32, 0);
loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction());
@@ -201,10 +197,10 @@
BuildLoop();
// Populate the loop with instructions: set/get array with same types.
- HInstruction* get_array = new (&allocator_) HArrayGet(
+ HInstruction* get_array = new (GetAllocator()) HArrayGet(
parameter_, int_constant_, DataType::Type::kFloat32, 0);
loop_body_->InsertInstructionBefore(get_array, loop_body_->GetLastInstruction());
- HInstruction* set_array = new (&allocator_) HArraySet(
+ HInstruction* set_array = new (GetAllocator()) HArraySet(
parameter_, get_array, float_constant_, DataType::Type::kFloat32, 0);
loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction());
diff --git a/compiler/optimizing/linear_order.cc b/compiler/optimizing/linear_order.cc
index 80cecd4..58e00a8 100644
--- a/compiler/optimizing/linear_order.cc
+++ b/compiler/optimizing/linear_order.cc
@@ -16,6 +16,9 @@
#include "linear_order.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
+
namespace art {
static bool InSameLoop(HLoopInformation* first_loop, HLoopInformation* second_loop) {
@@ -34,7 +37,8 @@
}
// Helper method to update work list for linear order.
-static void AddToListForLinearization(ArenaVector<HBasicBlock*>* worklist, HBasicBlock* block) {
+static void AddToListForLinearization(ScopedArenaVector<HBasicBlock*>* worklist,
+ HBasicBlock* block) {
HLoopInformation* block_loop = block->GetLoopInformation();
auto insert_pos = worklist->rbegin(); // insert_pos.base() will be the actual position.
for (auto end = worklist->rend(); insert_pos != end; ++insert_pos) {
@@ -51,7 +55,7 @@
}
// Helper method to validate linear order.
-static bool IsLinearOrderWellFormed(const HGraph* graph, ArenaVector<HBasicBlock*>* linear_order) {
+static bool IsLinearOrderWellFormed(const HGraph* graph, ArrayRef<HBasicBlock*> linear_order) {
for (HBasicBlock* header : graph->GetBlocks()) {
if (header == nullptr || !header->IsLoopHeader()) {
continue;
@@ -59,7 +63,7 @@
HLoopInformation* loop = header->GetLoopInformation();
size_t num_blocks = loop->GetBlocks().NumSetBits();
size_t found_blocks = 0u;
- for (HBasicBlock* block : *linear_order) {
+ for (HBasicBlock* block : linear_order) {
if (loop->Contains(*block)) {
found_blocks++;
if (found_blocks == 1u && block != header) {
@@ -79,10 +83,8 @@
return true;
}
-void LinearizeGraph(const HGraph* graph,
- ArenaAllocator* allocator,
- ArenaVector<HBasicBlock*>* linear_order) {
- DCHECK(linear_order->empty());
+void LinearizeGraphInternal(const HGraph* graph, ArrayRef<HBasicBlock*> linear_order) {
+ DCHECK_EQ(linear_order.size(), graph->GetReversePostOrder().size());
// Create a reverse post ordering with the following properties:
// - Blocks in a loop are consecutive,
// - Back-edge is the last block before loop exits.
@@ -92,8 +94,9 @@
// current reverse post order in the graph, but it would require making
// order queries to a GrowableArray, which is not the best data structure
// for it.
- ArenaVector<uint32_t> forward_predecessors(graph->GetBlocks().size(),
- allocator->Adapter(kArenaAllocLinearOrder));
+ ScopedArenaAllocator allocator(graph->GetArenaStack());
+ ScopedArenaVector<uint32_t> forward_predecessors(graph->GetBlocks().size(),
+ allocator.Adapter(kArenaAllocLinearOrder));
for (HBasicBlock* block : graph->GetReversePostOrder()) {
size_t number_of_forward_predecessors = block->GetPredecessors().size();
if (block->IsLoopHeader()) {
@@ -105,13 +108,14 @@
// iterate over the successors. When all non-back edge predecessors of a
// successor block are visited, the successor block is added in the worklist
// following an order that satisfies the requirements to build our linear graph.
- linear_order->reserve(graph->GetReversePostOrder().size());
- ArenaVector<HBasicBlock*> worklist(allocator->Adapter(kArenaAllocLinearOrder));
+ ScopedArenaVector<HBasicBlock*> worklist(allocator.Adapter(kArenaAllocLinearOrder));
worklist.push_back(graph->GetEntryBlock());
+ size_t num_added = 0u;
do {
HBasicBlock* current = worklist.back();
worklist.pop_back();
- linear_order->push_back(current);
+ linear_order[num_added] = current;
+ ++num_added;
for (HBasicBlock* successor : current->GetSuccessors()) {
int block_id = successor->GetBlockId();
size_t number_of_remaining_predecessors = forward_predecessors[block_id];
@@ -121,6 +125,7 @@
forward_predecessors[block_id] = number_of_remaining_predecessors - 1;
}
} while (!worklist.empty());
+ DCHECK_EQ(num_added, linear_order.size());
DCHECK(graph->HasIrreducibleLoops() || IsLinearOrderWellFormed(graph, linear_order));
}
diff --git a/compiler/optimizing/linear_order.h b/compiler/optimizing/linear_order.h
index 7122d67..151db00 100644
--- a/compiler/optimizing/linear_order.h
+++ b/compiler/optimizing/linear_order.h
@@ -17,10 +17,14 @@
#ifndef ART_COMPILER_OPTIMIZING_LINEAR_ORDER_H_
#define ART_COMPILER_OPTIMIZING_LINEAR_ORDER_H_
+#include <type_traits>
+
#include "nodes.h"
namespace art {
+void LinearizeGraphInternal(const HGraph* graph, ArrayRef<HBasicBlock*> linear_order);
+
// Linearizes the 'graph' such that:
// (1): a block is always after its dominator,
// (2): blocks of loops are contiguous.
@@ -32,9 +36,15 @@
//
// for (HBasicBlock* block : ReverseRange(linear_order)) // linear post order
//
-void LinearizeGraph(const HGraph* graph,
- ArenaAllocator* allocator,
- ArenaVector<HBasicBlock*>* linear_order);
+template <typename Vector>
+void LinearizeGraph(const HGraph* graph, Vector* linear_order) {
+ static_assert(std::is_same<HBasicBlock*, typename Vector::value_type>::value,
+ "Vector::value_type must be HBasicBlock*.");
+ // Resize the vector and pass an ArrayRef<> to internal implementation which is shared
+ // for all kinds of vectors, i.e. ArenaVector<> or ScopedArenaVector<>.
+ linear_order->resize(graph->GetReversePostOrder().size());
+ LinearizeGraphInternal(graph, ArrayRef<HBasicBlock*>(*linear_order));
+}
} // namespace art
diff --git a/compiler/optimizing/linearize_test.cc b/compiler/optimizing/linearize_test.cc
index 3831aa6..b2a9c0a 100644
--- a/compiler/optimizing/linearize_test.cc
+++ b/compiler/optimizing/linearize_test.cc
@@ -32,17 +32,20 @@
namespace art {
-class LinearizeTest : public CommonCompilerTest {};
+class LinearizeTest : public OptimizingUnitTest {
+ protected:
+ template <size_t number_of_blocks>
+ void TestCode(const uint16_t* data, const uint32_t (&expected_order)[number_of_blocks]);
+};
template <size_t number_of_blocks>
-static void TestCode(const uint16_t* data, const uint32_t (&expected_order)[number_of_blocks]) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+void LinearizeTest::TestCode(const uint16_t* data,
+ const uint32_t (&expected_order)[number_of_blocks]) {
+ HGraph* graph = CreateCFG(data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
ASSERT_EQ(graph->GetLinearOrder().size(), number_of_blocks);
diff --git a/compiler/optimizing/live_interval_test.cc b/compiler/optimizing/live_interval_test.cc
index 405f261..c60386d 100644
--- a/compiler/optimizing/live_interval_test.cc
+++ b/compiler/optimizing/live_interval_test.cc
@@ -23,29 +23,29 @@
namespace art {
TEST(LiveInterval, GetStart) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaPoolAndAllocator pool;
+ ScopedArenaAllocator* allocator = pool.GetScopedAllocator();
{
static constexpr size_t ranges[][2] = {{0, 42}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
ASSERT_EQ(0u, interval->GetStart());
}
{
static constexpr size_t ranges[][2] = {{4, 12}, {14, 16}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
ASSERT_EQ(4u, interval->GetStart());
}
}
TEST(LiveInterval, IsDeadAt) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaPoolAndAllocator pool;
+ ScopedArenaAllocator* allocator = pool.GetScopedAllocator();
{
static constexpr size_t ranges[][2] = {{0, 42}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
ASSERT_TRUE(interval->IsDeadAt(42));
ASSERT_TRUE(interval->IsDeadAt(43));
ASSERT_FALSE(interval->IsDeadAt(41));
@@ -55,7 +55,7 @@
{
static constexpr size_t ranges[][2] = {{4, 12}, {14, 16}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
ASSERT_TRUE(interval->IsDeadAt(16));
ASSERT_TRUE(interval->IsDeadAt(32));
ASSERT_FALSE(interval->IsDeadAt(0));
@@ -68,12 +68,12 @@
}
TEST(LiveInterval, Covers) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaPoolAndAllocator pool;
+ ScopedArenaAllocator* allocator = pool.GetScopedAllocator();
{
static constexpr size_t ranges[][2] = {{0, 42}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
ASSERT_TRUE(interval->Covers(0));
ASSERT_TRUE(interval->Covers(4));
ASSERT_TRUE(interval->Covers(41));
@@ -83,7 +83,7 @@
{
static constexpr size_t ranges[][2] = {{4, 12}, {14, 16}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
ASSERT_FALSE(interval->Covers(0));
ASSERT_TRUE(interval->Covers(4));
ASSERT_TRUE(interval->Covers(11));
@@ -96,68 +96,68 @@
}
TEST(LiveInterval, FirstIntersectionWith) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaPoolAndAllocator pool;
+ ScopedArenaAllocator* allocator = pool.GetScopedAllocator();
{
static constexpr size_t ranges1[][2] = {{0, 4}, {8, 10}};
- LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+ LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), allocator);
static constexpr size_t ranges2[][2] = {{5, 6}};
- LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+ LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), allocator);
ASSERT_EQ(kNoLifetime, interval1->FirstIntersectionWith(interval2));
}
{
static constexpr size_t ranges1[][2] = {{0, 4}, {8, 10}};
- LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+ LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), allocator);
static constexpr size_t ranges2[][2] = {{5, 42}};
- LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+ LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), allocator);
ASSERT_EQ(8u, interval1->FirstIntersectionWith(interval2));
}
{
static constexpr size_t ranges1[][2] = {{0, 4}, {8, 10}};
- LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+ LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), allocator);
static constexpr size_t ranges2[][2] = {{5, 6}, {7, 8}, {11, 12}};
- LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+ LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), allocator);
ASSERT_EQ(kNoLifetime, interval1->FirstIntersectionWith(interval2));
}
{
static constexpr size_t ranges1[][2] = {{0, 4}, {8, 10}};
- LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+ LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), allocator);
static constexpr size_t ranges2[][2] = {{5, 6}, {7, 8}, {9, 10}};
- LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+ LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), allocator);
ASSERT_EQ(9u, interval1->FirstIntersectionWith(interval2));
}
{
static constexpr size_t ranges1[][2] = {{0, 1}, {2, 7}, {8, 10}};
- LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+ LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), allocator);
static constexpr size_t ranges2[][2] = {{1, 2}, {6, 7}, {9, 10}};
- LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+ LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), allocator);
ASSERT_EQ(6u, interval1->FirstIntersectionWith(interval2));
}
{
static constexpr size_t ranges1[][2] = {{0, 1}, {2, 8}, {55, 58}};
- LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+ LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), allocator);
static constexpr size_t ranges2[][2] = {{1, 2}, {11, 42}, {43, 48}, {54, 56}};
- LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+ LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), allocator);
ASSERT_EQ(55u, interval1->FirstIntersectionWith(interval2));
}
{
static constexpr size_t ranges1[][2] = {{0, 1}, {2, 8}, {15, 18}, {27, 32}, {41, 53}, {54, 60}};
- LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), &allocator);
+ LiveInterval* interval1 = BuildInterval(ranges1, arraysize(ranges1), allocator);
static constexpr size_t ranges2[][2] = {{1, 2}, {11, 12}, {19, 25}, {34, 42}, {52, 60}};
- LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), &allocator);
+ LiveInterval* interval2 = BuildInterval(ranges2, arraysize(ranges2), allocator);
ASSERT_EQ(41u, interval1->FirstIntersectionWith(interval2));
}
@@ -188,13 +188,13 @@
}
TEST(LiveInterval, SplitAt) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaPoolAndAllocator pool;
+ ScopedArenaAllocator* allocator = pool.GetScopedAllocator();
{
// Test within one range.
static constexpr size_t ranges[][2] = {{0, 4}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
LiveInterval* split = interval->SplitAt(1);
static constexpr size_t expected[][2] = {{0, 1}};
ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
@@ -205,7 +205,7 @@
{
// Test just before the end of one range.
static constexpr size_t ranges[][2] = {{0, 4}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
LiveInterval* split = interval->SplitAt(3);
static constexpr size_t expected[][2] = {{0, 3}};
ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
@@ -216,7 +216,7 @@
{
// Test withing the first range.
static constexpr size_t ranges[][2] = {{0, 4}, {8, 12}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
LiveInterval* split = interval->SplitAt(1);
static constexpr size_t expected[][2] = {{0, 1}};
ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
@@ -227,7 +227,7 @@
{
// Test in a hole.
static constexpr size_t ranges[][2] = {{0, 4}, {8, 12}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
LiveInterval* split = interval->SplitAt(5);
static constexpr size_t expected[][2] = {{0, 4}};
ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
@@ -238,7 +238,7 @@
{
// Test withing the second range.
static constexpr size_t ranges[][2] = {{0, 4}, {8, 12}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
LiveInterval* split = interval->SplitAt(9);
static constexpr size_t expected[][2] = {{0, 4}, {8, 9}};
ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
@@ -249,7 +249,7 @@
{
// Test at the beginning of the second range.
static constexpr size_t ranges[][2] = {{0, 4}, {6, 10}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
LiveInterval* split = interval->SplitAt(6);
static constexpr size_t expected[][2] = {{0, 4}};
ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
@@ -260,7 +260,7 @@
{
// Test at the end of the first range.
static constexpr size_t ranges[][2] = {{0, 4}, {6, 10}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
LiveInterval* split = interval->SplitAt(4);
static constexpr size_t expected[][2] = {{0, 4}};
ASSERT_TRUE(RangesEquals(interval, expected, arraysize(expected)));
@@ -271,7 +271,7 @@
{
// Test that we get null if we split at a position where the interval is dead.
static constexpr size_t ranges[][2] = {{0, 4}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
LiveInterval* split = interval->SplitAt(5);
ASSERT_TRUE(split == nullptr);
ASSERT_TRUE(RangesEquals(interval, ranges, arraysize(ranges)));
@@ -279,13 +279,13 @@
}
TEST(LiveInterval, AddLoopRange) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
+ ArenaPoolAndAllocator pool;
+ ScopedArenaAllocator* allocator = pool.GetScopedAllocator();
{
// Test when only used in a loop.
static constexpr size_t ranges[][2] = {{0, 4}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
interval->AddLoopRange(0, 8);
LiveRange* range = interval->GetFirstRange();
ASSERT_TRUE(range->GetNext() == nullptr);
@@ -296,7 +296,7 @@
{
// Test when only used in a loop.
static constexpr size_t ranges[][2] = {{2, 4}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
interval->AddLoopRange(0, 8);
LiveRange* range = interval->GetFirstRange();
ASSERT_TRUE(range->GetNext() == nullptr);
@@ -307,7 +307,7 @@
{
// Test when used just after the loop.
static constexpr size_t ranges[][2] = {{2, 4}, {8, 10}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
interval->AddLoopRange(0, 8);
LiveRange* range = interval->GetFirstRange();
ASSERT_TRUE(range->GetNext() == nullptr);
@@ -318,7 +318,7 @@
{
// Test when use after the loop is after a lifetime hole.
static constexpr size_t ranges[][2] = {{2, 4}, {10, 12}};
- LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), &allocator);
+ LiveInterval* interval = BuildInterval(ranges, arraysize(ranges), allocator);
interval->AddLoopRange(0, 8);
LiveRange* range = interval->GetFirstRange();
ASSERT_EQ(range->GetStart(), 0u);
diff --git a/compiler/optimizing/live_ranges_test.cc b/compiler/optimizing/live_ranges_test.cc
index f9a955f..ddcad5a 100644
--- a/compiler/optimizing/live_ranges_test.cc
+++ b/compiler/optimizing/live_ranges_test.cc
@@ -29,10 +29,13 @@
namespace art {
-class LiveRangesTest : public CommonCompilerTest {};
+class LiveRangesTest : public OptimizingUnitTest {
+ public:
+ HGraph* BuildGraph(const uint16_t* data);
+};
-static HGraph* BuildGraph(const uint16_t* data, ArenaAllocator* allocator) {
- HGraph* graph = CreateCFG(allocator, data);
+HGraph* LiveRangesTest::BuildGraph(const uint16_t* data) {
+ HGraph* graph = CreateCFG(data);
// Suspend checks implementation may change in the future, and this test relies
// on how instructions are ordered.
RemoveSuspendChecks(graph);
@@ -58,14 +61,12 @@
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN);
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = BuildGraph(data, &allocator);
+ HGraph* graph = BuildGraph(data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
LiveInterval* interval = liveness.GetInstructionFromSsaIndex(0)->GetLiveInterval();
@@ -107,13 +108,11 @@
Instruction::GOTO | 0x100,
Instruction::RETURN | 0 << 8);
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = BuildGraph(data, &allocator);
+ HGraph* graph = BuildGraph(data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
LiveInterval* interval = liveness.GetInstructionFromSsaIndex(0)->GetLiveInterval();
@@ -158,13 +157,11 @@
Instruction::CONST_4 | 4 << 12 | 0,
Instruction::RETURN | 0 << 8);
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = BuildGraph(data, &allocator);
+ HGraph* graph = BuildGraph(data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
// Test for the 4 constant.
@@ -236,14 +233,12 @@
Instruction::CONST_4 | 5 << 12 | 1 << 8,
Instruction::RETURN | 1 << 8);
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = BuildGraph(data, &allocator);
+ HGraph* graph = BuildGraph(data);
RemoveSuspendChecks(graph);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
// Test for the 0 constant.
@@ -316,13 +311,11 @@
Instruction::GOTO | 0xFB00,
Instruction::RETURN | 0 << 8);
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = BuildGraph(data, &allocator);
+ HGraph* graph = BuildGraph(data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
// Test for the 0 constant.
@@ -394,13 +387,11 @@
Instruction::ADD_INT, 1 << 8,
Instruction::RETURN);
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = BuildGraph(data, &allocator);
+ HGraph* graph = BuildGraph(data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
// Test for the 0 constant.
diff --git a/compiler/optimizing/liveness_test.cc b/compiler/optimizing/liveness_test.cc
index 37b58de..3eadc8f 100644
--- a/compiler/optimizing/liveness_test.cc
+++ b/compiler/optimizing/liveness_test.cc
@@ -29,7 +29,10 @@
namespace art {
-class LivenessTest : public CommonCompilerTest {};
+class LivenessTest : public OptimizingUnitTest {
+ protected:
+ void TestCode(const uint16_t* data, const char* expected);
+};
static void DumpBitVector(BitVector* vector,
std::ostream& buffer,
@@ -43,16 +46,14 @@
buffer << ")\n";
}
-static void TestCode(const uint16_t* data, const char* expected) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+void LivenessTest::TestCode(const uint16_t* data, const char* expected) {
+ HGraph* graph = CreateCFG(data);
// `Inline` conditions into ifs.
PrepareForRegisterAllocation(graph).Run();
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
std::ostringstream buffer;
diff --git a/compiler/optimizing/load_store_analysis.h b/compiler/optimizing/load_store_analysis.h
index d46b904..6a25da3 100644
--- a/compiler/optimizing/load_store_analysis.h
+++ b/compiler/optimizing/load_store_analysis.h
@@ -172,9 +172,9 @@
explicit HeapLocationCollector(HGraph* graph)
: HGraphVisitor(graph),
- ref_info_array_(graph->GetArena()->Adapter(kArenaAllocLSE)),
- heap_locations_(graph->GetArena()->Adapter(kArenaAllocLSE)),
- aliasing_matrix_(graph->GetArena(),
+ ref_info_array_(graph->GetAllocator()->Adapter(kArenaAllocLSE)),
+ heap_locations_(graph->GetAllocator()->Adapter(kArenaAllocLSE)),
+ aliasing_matrix_(graph->GetAllocator(),
kInitialAliasingMatrixBitVectorSize,
true,
kArenaAllocLSE),
@@ -362,7 +362,7 @@
ReferenceInfo* ref_info = FindReferenceInfoOf(instruction);
if (ref_info == nullptr) {
size_t pos = ref_info_array_.size();
- ref_info = new (GetGraph()->GetArena()) ReferenceInfo(instruction, pos);
+ ref_info = new (GetGraph()->GetAllocator()) ReferenceInfo(instruction, pos);
ref_info_array_.push_back(ref_info);
}
return ref_info;
@@ -385,7 +385,7 @@
size_t heap_location_idx = FindHeapLocationIndex(
ref_info, offset, index, declaring_class_def_index);
if (heap_location_idx == kHeapLocationNotFound) {
- HeapLocation* heap_loc = new (GetGraph()->GetArena())
+ HeapLocation* heap_loc = new (GetGraph()->GetAllocator())
HeapLocation(ref_info, offset, index, declaring_class_def_index);
heap_locations_.push_back(heap_loc);
return heap_loc;
diff --git a/compiler/optimizing/load_store_analysis_test.cc b/compiler/optimizing/load_store_analysis_test.cc
index 0df2f27..86696d0 100644
--- a/compiler/optimizing/load_store_analysis_test.cc
+++ b/compiler/optimizing/load_store_analysis_test.cc
@@ -22,19 +22,15 @@
namespace art {
-class LoadStoreAnalysisTest : public CommonCompilerTest {
+class LoadStoreAnalysisTest : public OptimizingUnitTest {
public:
- LoadStoreAnalysisTest() : pool_(), allocator_(&pool_) {
- graph_ = CreateGraph(&allocator_);
- }
+ LoadStoreAnalysisTest() : graph_(CreateGraph()) { }
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
};
TEST_F(LoadStoreAnalysisTest, ArrayHeapLocations) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
@@ -48,18 +44,19 @@
// array_get2 ArrayGet [array, c2]
// array_set1 ArraySet [array, c1, c3]
// array_set2 ArraySet [array, index, c3]
- HInstruction* array = new (&allocator_) HParameterValue(
+ HInstruction* array = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
- HInstruction* index = new (&allocator_) HParameterValue(
+ HInstruction* index = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kInt32);
HInstruction* c1 = graph_->GetIntConstant(1);
HInstruction* c2 = graph_->GetIntConstant(2);
HInstruction* c3 = graph_->GetIntConstant(3);
- HInstruction* array_get1 = new (&allocator_) HArrayGet(array, c1, DataType::Type::kInt32, 0);
- HInstruction* array_get2 = new (&allocator_) HArrayGet(array, c2, DataType::Type::kInt32, 0);
- HInstruction* array_set1 = new (&allocator_) HArraySet(array, c1, c3, DataType::Type::kInt32, 0);
+ HInstruction* array_get1 = new (GetAllocator()) HArrayGet(array, c1, DataType::Type::kInt32, 0);
+ HInstruction* array_get2 = new (GetAllocator()) HArrayGet(array, c2, DataType::Type::kInt32, 0);
+ HInstruction* array_set1 =
+ new (GetAllocator()) HArraySet(array, c1, c3, DataType::Type::kInt32, 0);
HInstruction* array_set2 =
- new (&allocator_) HArraySet(array, index, c3, DataType::Type::kInt32, 0);
+ new (GetAllocator()) HArraySet(array, index, c3, DataType::Type::kInt32, 0);
entry->AddInstruction(array);
entry->AddInstruction(index);
entry->AddInstruction(array_get1);
@@ -107,7 +104,7 @@
}
TEST_F(LoadStoreAnalysisTest, FieldHeapLocations) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
@@ -119,38 +116,38 @@
// get_field20 InstanceFieldGet [object, 20]
HInstruction* c1 = graph_->GetIntConstant(1);
- HInstruction* object = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kReference);
- HInstanceFieldSet* set_field10 = new (&allocator_) HInstanceFieldSet(object,
- c1,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(10),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph_->GetDexFile(),
- 0);
- HInstanceFieldGet* get_field10 = new (&allocator_) HInstanceFieldGet(object,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(10),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph_->GetDexFile(),
- 0);
- HInstanceFieldGet* get_field20 = new (&allocator_) HInstanceFieldGet(object,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(20),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph_->GetDexFile(),
- 0);
+ HInstruction* object = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kReference);
+ HInstanceFieldSet* set_field10 = new (GetAllocator()) HInstanceFieldSet(object,
+ c1,
+ nullptr,
+ DataType::Type::kInt32,
+ MemberOffset(10),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph_->GetDexFile(),
+ 0);
+ HInstanceFieldGet* get_field10 = new (GetAllocator()) HInstanceFieldGet(object,
+ nullptr,
+ DataType::Type::kInt32,
+ MemberOffset(10),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph_->GetDexFile(),
+ 0);
+ HInstanceFieldGet* get_field20 = new (GetAllocator()) HInstanceFieldGet(object,
+ nullptr,
+ DataType::Type::kInt32,
+ MemberOffset(20),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph_->GetDexFile(),
+ 0);
entry->AddInstruction(object);
entry->AddInstruction(set_field10);
entry->AddInstruction(get_field10);
@@ -186,34 +183,38 @@
}
TEST_F(LoadStoreAnalysisTest, ArrayIndexAliasingTest) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
graph_->BuildDominatorTree();
- HInstruction* array = new (&allocator_) HParameterValue(
+ HInstruction* array = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
- HInstruction* index = new (&allocator_) HParameterValue(
+ HInstruction* index = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kInt32);
HInstruction* c0 = graph_->GetIntConstant(0);
HInstruction* c1 = graph_->GetIntConstant(1);
HInstruction* c_neg1 = graph_->GetIntConstant(-1);
- HInstruction* add0 = new (&allocator_) HAdd(DataType::Type::kInt32, index, c0);
- HInstruction* add1 = new (&allocator_) HAdd(DataType::Type::kInt32, index, c1);
- HInstruction* sub0 = new (&allocator_) HSub(DataType::Type::kInt32, index, c0);
- HInstruction* sub1 = new (&allocator_) HSub(DataType::Type::kInt32, index, c1);
- HInstruction* sub_neg1 = new (&allocator_) HSub(DataType::Type::kInt32, index, c_neg1);
- HInstruction* rev_sub1 = new (&allocator_) HSub(DataType::Type::kInt32, c1, index);
- HInstruction* arr_set1 = new (&allocator_) HArraySet(array, c0, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set2 = new (&allocator_) HArraySet(array, c1, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set3 = new (&allocator_) HArraySet(array, add0, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set4 = new (&allocator_) HArraySet(array, add1, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set5 = new (&allocator_) HArraySet(array, sub0, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set6 = new (&allocator_) HArraySet(array, sub1, c0, DataType::Type::kInt32, 0);
+ HInstruction* add0 = new (GetAllocator()) HAdd(DataType::Type::kInt32, index, c0);
+ HInstruction* add1 = new (GetAllocator()) HAdd(DataType::Type::kInt32, index, c1);
+ HInstruction* sub0 = new (GetAllocator()) HSub(DataType::Type::kInt32, index, c0);
+ HInstruction* sub1 = new (GetAllocator()) HSub(DataType::Type::kInt32, index, c1);
+ HInstruction* sub_neg1 = new (GetAllocator()) HSub(DataType::Type::kInt32, index, c_neg1);
+ HInstruction* rev_sub1 = new (GetAllocator()) HSub(DataType::Type::kInt32, c1, index);
+ HInstruction* arr_set1 = new (GetAllocator()) HArraySet(array, c0, c0, DataType::Type::kInt32, 0);
+ HInstruction* arr_set2 = new (GetAllocator()) HArraySet(array, c1, c0, DataType::Type::kInt32, 0);
+ HInstruction* arr_set3 =
+ new (GetAllocator()) HArraySet(array, add0, c0, DataType::Type::kInt32, 0);
+ HInstruction* arr_set4 =
+ new (GetAllocator()) HArraySet(array, add1, c0, DataType::Type::kInt32, 0);
+ HInstruction* arr_set5 =
+ new (GetAllocator()) HArraySet(array, sub0, c0, DataType::Type::kInt32, 0);
+ HInstruction* arr_set6 =
+ new (GetAllocator()) HArraySet(array, sub1, c0, DataType::Type::kInt32, 0);
HInstruction* arr_set7 =
- new (&allocator_) HArraySet(array, rev_sub1, c0, DataType::Type::kInt32, 0);
+ new (GetAllocator()) HArraySet(array, rev_sub1, c0, DataType::Type::kInt32, 0);
HInstruction* arr_set8 =
- new (&allocator_) HArraySet(array, sub_neg1, c0, DataType::Type::kInt32, 0);
+ new (GetAllocator()) HArraySet(array, sub_neg1, c0, DataType::Type::kInt32, 0);
entry->AddInstruction(array);
entry->AddInstruction(index);
@@ -272,14 +273,14 @@
}
TEST_F(LoadStoreAnalysisTest, ArrayIndexCalculationOverflowTest) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
graph_->BuildDominatorTree();
- HInstruction* array = new (&allocator_) HParameterValue(
+ HInstruction* array = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
- HInstruction* index = new (&allocator_) HParameterValue(
+ HInstruction* index = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kInt32);
HInstruction* c0 = graph_->GetIntConstant(0);
@@ -290,40 +291,40 @@
HInstruction* c_0x80000001 = graph_->GetIntConstant(0x80000001);
// `index+0x80000000` and `index-0x80000000` array indices MAY alias.
- HInstruction* add_0x80000000 = new (&allocator_) HAdd(
+ HInstruction* add_0x80000000 = new (GetAllocator()) HAdd(
DataType::Type::kInt32, index, c_0x80000000);
- HInstruction* sub_0x80000000 = new (&allocator_) HSub(
+ HInstruction* sub_0x80000000 = new (GetAllocator()) HSub(
DataType::Type::kInt32, index, c_0x80000000);
- HInstruction* arr_set_1 = new (&allocator_) HArraySet(
+ HInstruction* arr_set_1 = new (GetAllocator()) HArraySet(
array, add_0x80000000, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set_2 = new (&allocator_) HArraySet(
+ HInstruction* arr_set_2 = new (GetAllocator()) HArraySet(
array, sub_0x80000000, c0, DataType::Type::kInt32, 0);
// `index+0x10` and `index-0xFFFFFFF0` array indices MAY alias.
- HInstruction* add_0x10 = new (&allocator_) HAdd(DataType::Type::kInt32, index, c_0x10);
- HInstruction* sub_0xFFFFFFF0 = new (&allocator_) HSub(
+ HInstruction* add_0x10 = new (GetAllocator()) HAdd(DataType::Type::kInt32, index, c_0x10);
+ HInstruction* sub_0xFFFFFFF0 = new (GetAllocator()) HSub(
DataType::Type::kInt32, index, c_0xFFFFFFF0);
- HInstruction* arr_set_3 = new (&allocator_) HArraySet(
+ HInstruction* arr_set_3 = new (GetAllocator()) HArraySet(
array, add_0x10, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set_4 = new (&allocator_) HArraySet(
+ HInstruction* arr_set_4 = new (GetAllocator()) HArraySet(
array, sub_0xFFFFFFF0, c0, DataType::Type::kInt32, 0);
// `index+0x7FFFFFFF` and `index-0x80000001` array indices MAY alias.
- HInstruction* add_0x7FFFFFFF = new (&allocator_) HAdd(
+ HInstruction* add_0x7FFFFFFF = new (GetAllocator()) HAdd(
DataType::Type::kInt32, index, c_0x7FFFFFFF);
- HInstruction* sub_0x80000001 = new (&allocator_) HSub(
+ HInstruction* sub_0x80000001 = new (GetAllocator()) HSub(
DataType::Type::kInt32, index, c_0x80000001);
- HInstruction* arr_set_5 = new (&allocator_) HArraySet(
+ HInstruction* arr_set_5 = new (GetAllocator()) HArraySet(
array, add_0x7FFFFFFF, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set_6 = new (&allocator_) HArraySet(
+ HInstruction* arr_set_6 = new (GetAllocator()) HArraySet(
array, sub_0x80000001, c0, DataType::Type::kInt32, 0);
// `index+0` and `index-0` array indices MAY alias.
- HInstruction* add_0 = new (&allocator_) HAdd(DataType::Type::kInt32, index, c0);
- HInstruction* sub_0 = new (&allocator_) HSub(DataType::Type::kInt32, index, c0);
- HInstruction* arr_set_7 = new (&allocator_) HArraySet(
+ HInstruction* add_0 = new (GetAllocator()) HAdd(DataType::Type::kInt32, index, c0);
+ HInstruction* sub_0 = new (GetAllocator()) HSub(DataType::Type::kInt32, index, c0);
+ HInstruction* arr_set_7 = new (GetAllocator()) HArraySet(
array, add_0, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set_8 = new (&allocator_) HArraySet(
+ HInstruction* arr_set_8 = new (GetAllocator()) HArraySet(
array, sub_0, c0, DataType::Type::kInt32, 0);
entry->AddInstruction(array);
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index bd14f2b..39bfc86 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -49,13 +49,13 @@
ArenaVector<HInstruction*>(heap_locations_collector.
GetNumberOfHeapLocations(),
kUnknownHeapValue,
- graph->GetArena()->Adapter(kArenaAllocLSE)),
- graph->GetArena()->Adapter(kArenaAllocLSE)),
- removed_loads_(graph->GetArena()->Adapter(kArenaAllocLSE)),
- substitute_instructions_for_loads_(graph->GetArena()->Adapter(kArenaAllocLSE)),
- possibly_removed_stores_(graph->GetArena()->Adapter(kArenaAllocLSE)),
- singleton_new_instances_(graph->GetArena()->Adapter(kArenaAllocLSE)),
- singleton_new_arrays_(graph->GetArena()->Adapter(kArenaAllocLSE)) {
+ graph->GetAllocator()->Adapter(kArenaAllocLSE)),
+ graph->GetAllocator()->Adapter(kArenaAllocLSE)),
+ removed_loads_(graph->GetAllocator()->Adapter(kArenaAllocLSE)),
+ substitute_instructions_for_loads_(graph->GetAllocator()->Adapter(kArenaAllocLSE)),
+ possibly_removed_stores_(graph->GetAllocator()->Adapter(kArenaAllocLSE)),
+ singleton_new_instances_(graph->GetAllocator()->Adapter(kArenaAllocLSE)),
+ singleton_new_arrays_(graph->GetAllocator()->Adapter(kArenaAllocLSE)) {
}
void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
@@ -276,6 +276,7 @@
case DataType::Type::kReference:
return GetGraph()->GetNullConstant();
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
diff --git a/compiler/optimizing/locations.cc b/compiler/optimizing/locations.cc
index 40fe35b..5879c6f 100644
--- a/compiler/optimizing/locations.cc
+++ b/compiler/optimizing/locations.cc
@@ -28,10 +28,10 @@
LocationSummary::LocationSummary(HInstruction* instruction,
CallKind call_kind,
- bool intrinsified)
- : inputs_(instruction->InputCount(),
- instruction->GetBlock()->GetGraph()->GetArena()->Adapter(kArenaAllocLocationSummary)),
- temps_(instruction->GetBlock()->GetGraph()->GetArena()->Adapter(kArenaAllocLocationSummary)),
+ bool intrinsified,
+ ArenaAllocator* allocator)
+ : inputs_(instruction->InputCount(), allocator->Adapter(kArenaAllocLocationSummary)),
+ temps_(allocator->Adapter(kArenaAllocLocationSummary)),
call_kind_(call_kind),
intrinsified_(intrinsified),
has_custom_slow_path_calling_convention_(false),
@@ -43,11 +43,17 @@
instruction->SetLocations(this);
if (NeedsSafepoint()) {
- ArenaAllocator* arena = instruction->GetBlock()->GetGraph()->GetArena();
- stack_mask_ = ArenaBitVector::Create(arena, 0, true, kArenaAllocLocationSummary);
+ stack_mask_ = ArenaBitVector::Create(allocator, 0, true, kArenaAllocLocationSummary);
}
}
+LocationSummary::LocationSummary(HInstruction* instruction,
+ CallKind call_kind,
+ bool intrinsified)
+ : LocationSummary(instruction,
+ call_kind,
+ intrinsified,
+ instruction->GetBlock()->GetGraph()->GetAllocator()) {}
Location Location::RegisterOrConstant(HInstruction* instruction) {
return instruction->IsConstant()
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index 6f0dbce..d56c151 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -665,6 +665,11 @@
}
private:
+ LocationSummary(HInstruction* instruction,
+ CallKind call_kind,
+ bool intrinsified,
+ ArenaAllocator* allocator);
+
ArenaVector<Location> inputs_;
ArenaVector<Location> temps_;
const CallKind call_kind_;
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 7e37018..d87861b 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -28,6 +28,46 @@
namespace art {
+// TODO: Clean up the packed type detection so that we have the right type straight away
+// and do not need to go through this normalization.
+static inline void NormalizePackedType(/* inout */ DataType::Type* type,
+ /* inout */ bool* is_unsigned) {
+ switch (*type) {
+ case DataType::Type::kBool:
+ DCHECK(!*is_unsigned);
+ break;
+ case DataType::Type::kUint8:
+ case DataType::Type::kInt8:
+ if (*is_unsigned) {
+ *is_unsigned = false;
+ *type = DataType::Type::kUint8;
+ } else {
+ *type = DataType::Type::kInt8;
+ }
+ break;
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
+ if (*is_unsigned) {
+ *is_unsigned = false;
+ *type = DataType::Type::kUint16;
+ } else {
+ *type = DataType::Type::kInt16;
+ }
+ break;
+ case DataType::Type::kInt32:
+ case DataType::Type::kInt64:
+ // We do not have kUint32 and kUint64 at the moment.
+ break;
+ case DataType::Type::kFloat32:
+ case DataType::Type::kFloat64:
+ DCHECK(!*is_unsigned);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected type " << *type;
+ UNREACHABLE();
+ }
+}
+
// Enables vectorization (SIMDization) in the loop optimizer.
static constexpr bool kEnableVectorization = true;
@@ -74,22 +114,20 @@
// Forward declaration.
static bool IsZeroExtensionAndGet(HInstruction* instruction,
DataType::Type type,
- /*out*/ HInstruction** operand,
- bool to64 = false);
+ /*out*/ HInstruction** operand);
-// Detect a sign extension in instruction from the given type. The to64 parameter
-// denotes if result is long, and thus sign extension from int can be included.
+// Detect a sign extension in instruction from the given type.
// Returns the promoted operand on success.
static bool IsSignExtensionAndGet(HInstruction* instruction,
DataType::Type type,
- /*out*/ HInstruction** operand,
- bool to64 = false) {
+ /*out*/ HInstruction** operand) {
// Accept any already wider constant that would be handled properly by sign
// extension when represented in the *width* of the given narrower data type
- // (the fact that char normally zero extends does not matter here).
+ // (the fact that Uint16 normally zero extends does not matter here).
int64_t value = 0;
if (IsInt64AndGet(instruction, /*out*/ &value)) {
switch (type) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
if (IsInt<8>(value)) {
*operand = instruction;
@@ -103,43 +141,39 @@
return true;
}
return false;
- case DataType::Type::kInt32:
- if (IsInt<32>(value)) {
- *operand = instruction;
- return to64;
- }
- return false;
default:
return false;
}
}
- // An implicit widening conversion of a signed integer to an integral type sign-extends
- // the two's-complement representation of the integer value to fill the wider format.
- if (instruction->GetType() == type && (instruction->IsArrayGet() ||
- instruction->IsStaticFieldGet() ||
- instruction->IsInstanceFieldGet())) {
+ // An implicit widening conversion of any signed expression sign-extends.
+ if (instruction->GetType() == type) {
switch (type) {
case DataType::Type::kInt8:
case DataType::Type::kInt16:
*operand = instruction;
return true;
- case DataType::Type::kInt32:
- *operand = instruction;
- return to64;
default:
return false;
}
}
- // Explicit type conversions.
+ // An explicit widening conversion of a signed expression sign-extends.
if (instruction->IsTypeConversion()) {
- DataType::Type from = instruction->InputAt(0)->GetType();
+ HInstruction* conv = instruction->InputAt(0);
+ DataType::Type from = conv->GetType();
switch (instruction->GetType()) {
+ case DataType::Type::kInt32:
case DataType::Type::kInt64:
- return IsSignExtensionAndGet(instruction->InputAt(0), type, /*out*/ operand, /*to64*/ true);
+ if (type == from && (from == DataType::Type::kInt8 ||
+ from == DataType::Type::kInt16 ||
+ from == DataType::Type::kInt32)) {
+ *operand = conv;
+ return true;
+ }
+ return false;
case DataType::Type::kInt16:
return type == DataType::Type::kUint16 &&
from == DataType::Type::kUint16 &&
- IsZeroExtensionAndGet(instruction->InputAt(0), type, /*out*/ operand, to64);
+ IsZeroExtensionAndGet(instruction->InputAt(0), type, /*out*/ operand);
default:
return false;
}
@@ -147,19 +181,18 @@
return false;
}
-// Detect a zero extension in instruction from the given type. The to64 parameter
-// denotes if result is long, and thus zero extension from int can be included.
+// Detect a zero extension in instruction from the given type.
// Returns the promoted operand on success.
static bool IsZeroExtensionAndGet(HInstruction* instruction,
DataType::Type type,
- /*out*/ HInstruction** operand,
- bool to64) {
+ /*out*/ HInstruction** operand) {
// Accept any already wider constant that would be handled properly by zero
// extension when represented in the *width* of the given narrower data type
- // (the fact that byte/short/int normally sign extend does not matter here).
+ // (the fact that Int8/Int16 normally sign extend does not matter here).
int64_t value = 0;
if (IsInt64AndGet(instruction, /*out*/ &value)) {
switch (type) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
if (IsUint<8>(value)) {
*operand = instruction;
@@ -173,28 +206,26 @@
return true;
}
return false;
- case DataType::Type::kInt32:
- if (IsUint<32>(value)) {
- *operand = instruction;
- return to64;
- }
- return false;
default:
return false;
}
}
- // An implicit widening conversion of a char to an integral type zero-extends
- // the representation of the char value to fill the wider format.
- if (instruction->GetType() == type && (instruction->IsArrayGet() ||
- instruction->IsStaticFieldGet() ||
- instruction->IsInstanceFieldGet())) {
- if (type == DataType::Type::kUint16) {
- *operand = instruction;
- return true;
+ // An implicit widening conversion of any unsigned expression zero-extends.
+ if (instruction->GetType() == type) {
+ switch (type) {
+ case DataType::Type::kUint8:
+ case DataType::Type::kUint16:
+ *operand = instruction;
+ return true;
+ default:
+ return false;
}
}
// A sign (or zero) extension followed by an explicit removal of just the
// higher sign bits is equivalent to a zero extension of the underlying operand.
+ //
+ // TODO: move this into simplifier and use new type system instead.
+ //
if (instruction->IsAnd()) {
int64_t mask = 0;
HInstruction* a = instruction->InputAt(0);
@@ -205,27 +236,32 @@
(IsInt64AndGet(b, /*out*/ &mask) && (IsSignExtensionAndGet(a, type, /*out*/ operand) ||
IsZeroExtensionAndGet(a, type, /*out*/ operand)))) {
switch ((*operand)->GetType()) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
return mask == std::numeric_limits<uint8_t>::max();
case DataType::Type::kUint16:
case DataType::Type::kInt16:
return mask == std::numeric_limits<uint16_t>::max();
- case DataType::Type::kInt32:
- return mask == std::numeric_limits<uint32_t>::max() && to64;
default: return false;
}
}
}
- // Explicit type conversions.
+ // An explicit widening conversion of an unsigned expression zero-extends.
if (instruction->IsTypeConversion()) {
- DataType::Type from = instruction->InputAt(0)->GetType();
+ HInstruction* conv = instruction->InputAt(0);
+ DataType::Type from = conv->GetType();
switch (instruction->GetType()) {
+ case DataType::Type::kInt32:
case DataType::Type::kInt64:
- return IsZeroExtensionAndGet(instruction->InputAt(0), type, /*out*/ operand, /*to64*/ true);
+ if (type == from && from == DataType::Type::kUint16) {
+ *operand = conv;
+ return true;
+ }
+ return false;
case DataType::Type::kUint16:
return type == DataType::Type::kInt16 &&
from == DataType::Type::kInt16 &&
- IsSignExtensionAndGet(instruction->InputAt(0), type, /*out*/ operand, to64);
+ IsSignExtensionAndGet(instruction->InputAt(0), type, /*out*/ operand);
default:
return false;
}
@@ -268,51 +304,10 @@
// Compute relative vector length based on type difference.
static size_t GetOtherVL(DataType::Type other_type, DataType::Type vector_type, size_t vl) {
- switch (other_type) {
- case DataType::Type::kBool:
- case DataType::Type::kInt8:
- switch (vector_type) {
- case DataType::Type::kBool:
- case DataType::Type::kInt8: return vl;
- default: break;
- }
- return vl;
- case DataType::Type::kUint16:
- case DataType::Type::kInt16:
- switch (vector_type) {
- case DataType::Type::kBool:
- case DataType::Type::kInt8: return vl >> 1;
- case DataType::Type::kUint16:
- case DataType::Type::kInt16: return vl;
- default: break;
- }
- break;
- case DataType::Type::kInt32:
- switch (vector_type) {
- case DataType::Type::kBool:
- case DataType::Type::kInt8: return vl >> 2;
- case DataType::Type::kUint16:
- case DataType::Type::kInt16: return vl >> 1;
- case DataType::Type::kInt32: return vl;
- default: break;
- }
- break;
- case DataType::Type::kInt64:
- switch (vector_type) {
- case DataType::Type::kBool:
- case DataType::Type::kInt8: return vl >> 3;
- case DataType::Type::kUint16:
- case DataType::Type::kInt16: return vl >> 2;
- case DataType::Type::kInt32: return vl >> 1;
- case DataType::Type::kInt64: return vl;
- default: break;
- }
- break;
- default:
- break;
- }
- LOG(FATAL) << "Unsupported idiom conversion";
- UNREACHABLE();
+ DCHECK(DataType::IsIntegralType(other_type));
+ DCHECK(DataType::IsIntegralType(vector_type));
+ DCHECK_GE(DataType::SizeShift(other_type), DataType::SizeShift(vector_type));
+ return vl >> (DataType::SizeShift(other_type) - DataType::SizeShift(vector_type));
}
// Detect up to two instructions a and b, and an acccumulated constant c.
@@ -360,6 +355,22 @@
return false;
}
+// Detect a + c for constant c.
+static bool IsAddConst(HInstruction* instruction,
+ /*out*/ HInstruction** a,
+ /*out*/ int64_t* c) {
+ if (instruction->IsAdd()) {
+ if (IsInt64AndGet(instruction->InputAt(0), c)) {
+ *a = instruction->InputAt(1);
+ return true;
+ } else if (IsInt64AndGet(instruction->InputAt(1), c)) {
+ *a = instruction->InputAt(0);
+ return true;
+ }
+ }
+ return false;
+}
+
// Detect reductions of the following forms,
// x = x_phi + ..
// x = x_phi - ..
@@ -418,7 +429,7 @@
// Check that instructions from the induction sets are fully removed: have no uses
// and no other instructions use them.
-static bool CheckInductionSetFullyRemoved(ArenaSet<HInstruction*>* iset) {
+static bool CheckInductionSetFullyRemoved(ScopedArenaSet<HInstruction*>* iset) {
for (HInstruction* instr : *iset) {
if (instr->GetBlock() != nullptr ||
!instr->GetUses().empty() ||
@@ -442,7 +453,7 @@
compiler_driver_(compiler_driver),
induction_range_(induction_analysis),
loop_allocator_(nullptr),
- global_allocator_(graph_->GetArena()),
+ global_allocator_(graph_->GetAllocator()),
top_loop_(nullptr),
last_loop_(nullptr),
iset_(nullptr),
@@ -454,7 +465,12 @@
vector_runtime_test_a_(nullptr),
vector_runtime_test_b_(nullptr),
vector_map_(nullptr),
- vector_permanent_map_(nullptr) {
+ vector_permanent_map_(nullptr),
+ vector_mode_(kSequential),
+ vector_preheader_(nullptr),
+ vector_header_(nullptr),
+ vector_body_(nullptr),
+ vector_index_(nullptr) {
}
void HLoopOptimization::Run() {
@@ -464,10 +480,8 @@
return;
}
- // Phase-local allocator that draws from the global pool. Since the allocator
- // itself resides on the stack, it is destructed on exiting Run(), which
- // implies its underlying memory is released immediately.
- ArenaAllocator allocator(global_allocator_->GetArenaPool());
+ // Phase-local allocator.
+ ScopedArenaAllocator allocator(graph_->GetArenaStack());
loop_allocator_ = &allocator;
// Perform loop optimizations.
@@ -488,8 +502,8 @@
void HLoopOptimization::LocalRun() {
// Build the linear order using the phase-local allocator. This step enables building
// a loop hierarchy that properly reflects the outer-inner and previous-next relation.
- ArenaVector<HBasicBlock*> linear_order(loop_allocator_->Adapter(kArenaAllocLinearOrder));
- LinearizeGraph(graph_, loop_allocator_, &linear_order);
+ ScopedArenaVector<HBasicBlock*> linear_order(loop_allocator_->Adapter(kArenaAllocLinearOrder));
+ LinearizeGraph(graph_, &linear_order);
// Build the loop hierarchy.
for (HBasicBlock* block : linear_order) {
@@ -502,13 +516,13 @@
// temporary data structures using the phase-local allocator. All new HIR
// should use the global allocator.
if (top_loop_ != nullptr) {
- ArenaSet<HInstruction*> iset(loop_allocator_->Adapter(kArenaAllocLoopOptimization));
- ArenaSafeMap<HInstruction*, HInstruction*> reds(
+ ScopedArenaSet<HInstruction*> iset(loop_allocator_->Adapter(kArenaAllocLoopOptimization));
+ ScopedArenaSafeMap<HInstruction*, HInstruction*> reds(
std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization));
- ArenaSet<ArrayReference> refs(loop_allocator_->Adapter(kArenaAllocLoopOptimization));
- ArenaSafeMap<HInstruction*, HInstruction*> map(
+ ScopedArenaSet<ArrayReference> refs(loop_allocator_->Adapter(kArenaAllocLoopOptimization));
+ ScopedArenaSafeMap<HInstruction*, HInstruction*> map(
std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization));
- ArenaSafeMap<HInstruction*, HInstruction*> perm(
+ ScopedArenaSafeMap<HInstruction*, HInstruction*> perm(
std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization));
// Attach.
iset_ = &iset;
@@ -1100,19 +1114,19 @@
return true;
} else if (instruction->IsArrayGet()) {
// Deal with vector restrictions.
- if (instruction->AsArrayGet()->IsStringCharAt() &&
- HasVectorRestrictions(restrictions, kNoStringCharAt)) {
+ bool is_string_char_at = instruction->AsArrayGet()->IsStringCharAt();
+ if (is_string_char_at && HasVectorRestrictions(restrictions, kNoStringCharAt)) {
return false;
}
// Accept a right-hand-side array base[index] for
- // (1) exact matching vector type,
+ // (1) matching vector type (exact match or signed/unsigned integral type of the same size),
// (2) loop-invariant base,
// (3) unit stride index,
// (4) vectorizable right-hand-side value.
HInstruction* base = instruction->InputAt(0);
HInstruction* index = instruction->InputAt(1);
HInstruction* offset = nullptr;
- if (type == instruction->GetType() &&
+ if (HVecOperation::ToSignedType(type) == HVecOperation::ToSignedType(instruction->GetType()) &&
node->loop_info->IsDefinedOutOfTheLoop(base) &&
induction_range_.IsUnitStride(instruction, index, graph_, &offset)) {
if (generate_code) {
@@ -1148,6 +1162,7 @@
size_t size_vec = DataType::Size(type);
size_t size_from = DataType::Size(from);
size_t size_to = DataType::Size(to);
+ DataType::Type ctype = size_from == size_vec ? from : type;
// Accept an integral conversion
// (1a) narrowing into vector type, "wider" operations cannot bring in higher order bits, or
// (1b) widening from at least vector type, and
@@ -1157,7 +1172,7 @@
VectorizeUse(node, opa, generate_code, type, restrictions | kNoHiBits)) ||
(size_to >= size_from &&
size_from >= size_vec &&
- VectorizeUse(node, opa, generate_code, type, restrictions))) {
+ VectorizeUse(node, opa, generate_code, ctype, restrictions))) {
if (generate_code) {
if (vector_mode_ == kVector) {
vector_map_->Put(instruction, vector_map_->Get(opa)); // operand pass-through
@@ -1275,6 +1290,7 @@
}
if (VectorizeUse(node, r, generate_code, type, restrictions)) {
if (generate_code) {
+ NormalizePackedType(&type, &is_unsigned);
GenerateVecOp(instruction, vector_map_->Get(r), nullptr, type);
}
return true;
@@ -1334,6 +1350,7 @@
// ARM 32-bit always supports advanced SIMD (64-bit SIMD).
switch (type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
*restrictions |= kNoDiv | kNoReduction;
return TrySetVectorLength(8);
@@ -1353,6 +1370,7 @@
// ARMv8 AArch64 always supports advanced SIMD (128-bit SIMD).
switch (type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
*restrictions |= kNoDiv;
return TrySetVectorLength(16);
@@ -1381,6 +1399,7 @@
if (features->AsX86InstructionSetFeatures()->HasSSE4_1()) {
switch (type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
*restrictions |=
kNoMul | kNoDiv | kNoShift | kNoAbs | kNoSignedHAdd | kNoUnroundedHAdd | kNoSAD;
@@ -1410,6 +1429,7 @@
if (features->AsMipsInstructionSetFeatures()->HasMsa()) {
switch (type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
*restrictions |= kNoDiv | kNoReduction | kNoSAD;
return TrySetVectorLength(16);
@@ -1438,6 +1458,7 @@
if (features->AsMips64InstructionSetFeatures()->HasMsa()) {
switch (type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
*restrictions |= kNoDiv | kNoReduction | kNoSAD;
return TrySetVectorLength(16);
@@ -1502,7 +1523,7 @@
new (global_allocator_) HTypeConversion(type, input, kNoDexPc));
}
vector = new (global_allocator_)
- HVecReplicateScalar(global_allocator_, input, type, vector_length_);
+ HVecReplicateScalar(global_allocator_, input, type, vector_length_, kNoDexPc);
vector_permanent_map_->Put(org, Insert(vector_preheader_, vector));
}
vector_map_->Put(org, vector);
@@ -1528,17 +1549,24 @@
HInstruction* opb,
HInstruction* offset,
DataType::Type type) {
+ uint32_t dex_pc = org->GetDexPc();
HInstruction* vector = nullptr;
if (vector_mode_ == kVector) {
// Vector store or load.
HInstruction* base = org->InputAt(0);
if (opb != nullptr) {
vector = new (global_allocator_) HVecStore(
- global_allocator_, base, opa, opb, type, vector_length_);
+ global_allocator_, base, opa, opb, type, org->GetSideEffects(), vector_length_, dex_pc);
} else {
bool is_string_char_at = org->AsArrayGet()->IsStringCharAt();
- vector = new (global_allocator_) HVecLoad(
- global_allocator_, base, opa, type, vector_length_, is_string_char_at);
+ vector = new (global_allocator_) HVecLoad(global_allocator_,
+ base,
+ opa,
+ type,
+ org->GetSideEffects(),
+ vector_length_,
+ is_string_char_at,
+ dex_pc);
}
// Known dynamically enforced alignment?
if (vector_peeling_candidate_ != nullptr &&
@@ -1550,11 +1578,12 @@
// Scalar store or load.
DCHECK(vector_mode_ == kSequential);
if (opb != nullptr) {
- vector = new (global_allocator_) HArraySet(org->InputAt(0), opa, opb, type, kNoDexPc);
+ vector = new (global_allocator_) HArraySet(
+ org->InputAt(0), opa, opb, type, org->GetSideEffects(), dex_pc);
} else {
bool is_string_char_at = org->AsArrayGet()->IsStringCharAt();
vector = new (global_allocator_) HArrayGet(
- org->InputAt(0), opa, type, kNoDexPc, is_string_char_at);
+ org->InputAt(0), opa, type, org->GetSideEffects(), dex_pc, is_string_char_at);
}
}
vector_map_->Put(org, vector);
@@ -1603,7 +1632,8 @@
&new_init,
type,
vector_length,
- 1));
+ 1,
+ kNoDexPc));
} else {
new_init = ReduceAndExtractIfNeeded(new_init);
}
@@ -1629,10 +1659,10 @@
// y = x_1
// along the exit of the defining loop.
HInstruction* reduce = new (global_allocator_) HVecReduce(
- global_allocator_, instruction, type, vector_length, kind);
+ global_allocator_, instruction, type, vector_length, kind, kNoDexPc);
exit->InsertInstructionBefore(reduce, exit->GetFirstInstruction());
instruction = new (global_allocator_) HVecExtractScalar(
- global_allocator_, reduce, type, vector_length, 0);
+ global_allocator_, reduce, type, vector_length, 0, kNoDexPc);
exit->InsertInstructionAfter(instruction, reduce);
}
}
@@ -1653,69 +1683,70 @@
HInstruction* opb,
DataType::Type type,
bool is_unsigned) {
+ uint32_t dex_pc = org->GetDexPc();
HInstruction* vector = nullptr;
DataType::Type org_type = org->GetType();
switch (org->GetKind()) {
case HInstruction::kNeg:
DCHECK(opb == nullptr);
GENERATE_VEC(
- new (global_allocator_) HVecNeg(global_allocator_, opa, type, vector_length_),
- new (global_allocator_) HNeg(org_type, opa));
+ new (global_allocator_) HVecNeg(global_allocator_, opa, type, vector_length_, dex_pc),
+ new (global_allocator_) HNeg(org_type, opa, dex_pc));
case HInstruction::kNot:
DCHECK(opb == nullptr);
GENERATE_VEC(
- new (global_allocator_) HVecNot(global_allocator_, opa, type, vector_length_),
- new (global_allocator_) HNot(org_type, opa));
+ new (global_allocator_) HVecNot(global_allocator_, opa, type, vector_length_, dex_pc),
+ new (global_allocator_) HNot(org_type, opa, dex_pc));
case HInstruction::kBooleanNot:
DCHECK(opb == nullptr);
GENERATE_VEC(
- new (global_allocator_) HVecNot(global_allocator_, opa, type, vector_length_),
- new (global_allocator_) HBooleanNot(opa));
+ new (global_allocator_) HVecNot(global_allocator_, opa, type, vector_length_, dex_pc),
+ new (global_allocator_) HBooleanNot(opa, dex_pc));
case HInstruction::kTypeConversion:
DCHECK(opb == nullptr);
GENERATE_VEC(
- new (global_allocator_) HVecCnv(global_allocator_, opa, type, vector_length_),
- new (global_allocator_) HTypeConversion(org_type, opa, kNoDexPc));
+ new (global_allocator_) HVecCnv(global_allocator_, opa, type, vector_length_, dex_pc),
+ new (global_allocator_) HTypeConversion(org_type, opa, dex_pc));
case HInstruction::kAdd:
GENERATE_VEC(
- new (global_allocator_) HVecAdd(global_allocator_, opa, opb, type, vector_length_),
- new (global_allocator_) HAdd(org_type, opa, opb));
+ new (global_allocator_) HVecAdd(global_allocator_, opa, opb, type, vector_length_, dex_pc),
+ new (global_allocator_) HAdd(org_type, opa, opb, dex_pc));
case HInstruction::kSub:
GENERATE_VEC(
- new (global_allocator_) HVecSub(global_allocator_, opa, opb, type, vector_length_),
- new (global_allocator_) HSub(org_type, opa, opb));
+ new (global_allocator_) HVecSub(global_allocator_, opa, opb, type, vector_length_, dex_pc),
+ new (global_allocator_) HSub(org_type, opa, opb, dex_pc));
case HInstruction::kMul:
GENERATE_VEC(
- new (global_allocator_) HVecMul(global_allocator_, opa, opb, type, vector_length_),
- new (global_allocator_) HMul(org_type, opa, opb));
+ new (global_allocator_) HVecMul(global_allocator_, opa, opb, type, vector_length_, dex_pc),
+ new (global_allocator_) HMul(org_type, opa, opb, dex_pc));
case HInstruction::kDiv:
GENERATE_VEC(
- new (global_allocator_) HVecDiv(global_allocator_, opa, opb, type, vector_length_),
- new (global_allocator_) HDiv(org_type, opa, opb, kNoDexPc));
+ new (global_allocator_) HVecDiv(global_allocator_, opa, opb, type, vector_length_, dex_pc),
+ new (global_allocator_) HDiv(org_type, opa, opb, dex_pc));
case HInstruction::kAnd:
GENERATE_VEC(
- new (global_allocator_) HVecAnd(global_allocator_, opa, opb, type, vector_length_),
- new (global_allocator_) HAnd(org_type, opa, opb));
+ new (global_allocator_) HVecAnd(global_allocator_, opa, opb, type, vector_length_, dex_pc),
+ new (global_allocator_) HAnd(org_type, opa, opb, dex_pc));
case HInstruction::kOr:
GENERATE_VEC(
- new (global_allocator_) HVecOr(global_allocator_, opa, opb, type, vector_length_),
- new (global_allocator_) HOr(org_type, opa, opb));
+ new (global_allocator_) HVecOr(global_allocator_, opa, opb, type, vector_length_, dex_pc),
+ new (global_allocator_) HOr(org_type, opa, opb, dex_pc));
case HInstruction::kXor:
GENERATE_VEC(
- new (global_allocator_) HVecXor(global_allocator_, opa, opb, type, vector_length_),
- new (global_allocator_) HXor(org_type, opa, opb));
+ new (global_allocator_) HVecXor(global_allocator_, opa, opb, type, vector_length_, dex_pc),
+ new (global_allocator_) HXor(org_type, opa, opb, dex_pc));
case HInstruction::kShl:
GENERATE_VEC(
- new (global_allocator_) HVecShl(global_allocator_, opa, opb, type, vector_length_),
- new (global_allocator_) HShl(org_type, opa, opb));
+ new (global_allocator_) HVecShl(global_allocator_, opa, opb, type, vector_length_, dex_pc),
+ new (global_allocator_) HShl(org_type, opa, opb, dex_pc));
case HInstruction::kShr:
GENERATE_VEC(
- new (global_allocator_) HVecShr(global_allocator_, opa, opb, type, vector_length_),
- new (global_allocator_) HShr(org_type, opa, opb));
+ new (global_allocator_) HVecShr(global_allocator_, opa, opb, type, vector_length_, dex_pc),
+ new (global_allocator_) HShr(org_type, opa, opb, dex_pc));
case HInstruction::kUShr:
GENERATE_VEC(
- new (global_allocator_) HVecUShr(global_allocator_, opa, opb, type, vector_length_),
- new (global_allocator_) HUShr(org_type, opa, opb));
+ new (global_allocator_) HVecUShr(global_allocator_, opa, opb, type, vector_length_, dex_pc),
+ new (global_allocator_) HUShr(org_type, opa, opb, dex_pc));
case HInstruction::kInvokeStaticOrDirect: {
HInvokeStaticOrDirect* invoke = org->AsInvokeStaticOrDirect();
if (vector_mode_ == kVector) {
@@ -1725,22 +1756,25 @@
case Intrinsics::kMathAbsFloat:
case Intrinsics::kMathAbsDouble:
DCHECK(opb == nullptr);
- vector = new (global_allocator_) HVecAbs(global_allocator_, opa, type, vector_length_);
+ vector = new (global_allocator_)
+ HVecAbs(global_allocator_, opa, type, vector_length_, dex_pc);
break;
case Intrinsics::kMathMinIntInt:
case Intrinsics::kMathMinLongLong:
case Intrinsics::kMathMinFloatFloat:
case Intrinsics::kMathMinDoubleDouble: {
+ NormalizePackedType(&type, &is_unsigned);
vector = new (global_allocator_)
- HVecMin(global_allocator_, opa, opb, type, vector_length_, is_unsigned);
+ HVecMin(global_allocator_, opa, opb, type, vector_length_, is_unsigned, dex_pc);
break;
}
case Intrinsics::kMathMaxIntInt:
case Intrinsics::kMathMaxLongLong:
case Intrinsics::kMathMaxFloatFloat:
case Intrinsics::kMathMaxDoubleDouble: {
+ NormalizePackedType(&type, &is_unsigned);
vector = new (global_allocator_)
- HVecMax(global_allocator_, opa, opb, type, vector_length_, is_unsigned);
+ HVecMax(global_allocator_, opa, opb, type, vector_length_, is_unsigned, dex_pc);
break;
}
default:
@@ -1851,14 +1885,16 @@
VectorizeUse(node, s, generate_code, type, restrictions)) {
if (generate_code) {
if (vector_mode_ == kVector) {
+ NormalizePackedType(&type, &is_unsigned);
vector_map_->Put(instruction, new (global_allocator_) HVecHalvingAdd(
global_allocator_,
vector_map_->Get(r),
vector_map_->Get(s),
type,
vector_length_,
+ is_rounded,
is_unsigned,
- is_rounded));
+ kNoDexPc));
MaybeRecordStat(stats_, MethodCompilationStat::kLoopVectorizedIdiom);
} else {
GenerateVecOp(instruction, vector_map_->Get(r), vector_map_->Get(s), type);
@@ -1896,9 +1932,14 @@
(v->AsInvokeStaticOrDirect()->GetIntrinsic() == Intrinsics::kMathAbsInt ||
v->AsInvokeStaticOrDirect()->GetIntrinsic() == Intrinsics::kMathAbsLong)) {
HInstruction* x = v->InputAt(0);
- if (x->IsSub() && x->GetType() == reduction_type) {
- a = x->InputAt(0);
- b = x->InputAt(1);
+ if (x->GetType() == reduction_type) {
+ int64_t c = 0;
+ if (x->IsSub()) {
+ a = x->InputAt(0);
+ b = x->InputAt(1);
+ } else if (IsAddConst(x, /*out*/ &a, /*out*/ &c)) {
+ b = graph_->GetConstant(reduction_type, -c); // hidden SUB!
+ }
}
}
if (a == nullptr || b == nullptr) {
@@ -1906,22 +1947,21 @@
}
// Accept same-type or consistent sign extension for narrower-type on operands a and b.
// The same-type or narrower operands are called r (a or lower) and s (b or lower).
+ // We inspect the operands carefully to pick the most suited type.
HInstruction* r = a;
HInstruction* s = b;
bool is_unsigned = false;
DataType::Type sub_type = a->GetType();
- if (a->IsTypeConversion()) {
- HInstruction* hunt = a;
- while (hunt->IsTypeConversion()) {
- hunt = hunt->InputAt(0);
- }
- sub_type = hunt->GetType();
- } else if (b->IsTypeConversion()) {
- HInstruction* hunt = a;
- while (hunt->IsTypeConversion()) {
- hunt = hunt->InputAt(0);
- }
- sub_type = hunt->GetType();
+ if (DataType::Size(b->GetType()) < DataType::Size(sub_type)) {
+ sub_type = b->GetType();
+ }
+ if (a->IsTypeConversion() &&
+ DataType::Size(a->InputAt(0)->GetType()) < DataType::Size(sub_type)) {
+ sub_type = a->InputAt(0)->GetType();
+ }
+ if (b->IsTypeConversion() &&
+ DataType::Size(b->InputAt(0)->GetType()) < DataType::Size(sub_type)) {
+ sub_type = b->InputAt(0)->GetType();
}
if (reduction_type != sub_type &&
(!IsNarrowerOperands(a, b, sub_type, &r, &s, &is_unsigned) || is_unsigned)) {
@@ -1942,6 +1982,7 @@
VectorizeUse(node, r, generate_code, sub_type, restrictions) &&
VectorizeUse(node, s, generate_code, sub_type, restrictions)) {
if (generate_code) {
+ NormalizePackedType(&reduction_type, &is_unsigned);
if (vector_mode_ == kVector) {
vector_map_->Put(instruction, new (global_allocator_) HVecSADAccumulate(
global_allocator_,
@@ -1949,7 +1990,8 @@
vector_map_->Get(r),
vector_map_->Get(s),
reduction_type,
- GetOtherVL(reduction_type, sub_type, vector_length_)));
+ GetOtherVL(reduction_type, sub_type, vector_length_),
+ kNoDexPc));
MaybeRecordStat(stats_, MethodCompilationStat::kLoopVectorizedIdiom);
} else {
GenerateVecOp(v, vector_map_->Get(r), nullptr, reduction_type);
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 6e6e387..b1b3d11 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -17,6 +17,8 @@
#ifndef ART_COMPILER_OPTIMIZING_LOOP_OPTIMIZATION_H_
#define ART_COMPILER_OPTIMIZING_LOOP_OPTIMIZATION_H_
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
#include "induction_var_range.h"
#include "nodes.h"
#include "optimization.h"
@@ -220,7 +222,7 @@
// Phase-local heap memory allocator for the loop optimizer. Storage obtained
// through this allocator is immediately released when the loop optimizer is done.
- ArenaAllocator* loop_allocator_;
+ ScopedArenaAllocator* loop_allocator_;
// Global heap memory allocator. Used to build HIR.
ArenaAllocator* global_allocator_;
@@ -232,14 +234,14 @@
// Temporary bookkeeping of a set of instructions.
// Contents reside in phase-local heap memory.
- ArenaSet<HInstruction*>* iset_;
+ ScopedArenaSet<HInstruction*>* iset_;
// Temporary bookkeeping of reduction instructions. Mapping is two-fold:
// (1) reductions in the loop-body are mapped back to their phi definition,
// (2) phi definitions are mapped to their initial value (updated during
// code generation to feed the proper values into the new chain).
// Contents reside in phase-local heap memory.
- ArenaSafeMap<HInstruction*, HInstruction*>* reductions_;
+ ScopedArenaSafeMap<HInstruction*, HInstruction*>* reductions_;
// Flag that tracks if any simplifications have occurred.
bool simplified_;
@@ -249,7 +251,7 @@
// Set of array references in the vector loop.
// Contents reside in phase-local heap memory.
- ArenaSet<ArrayReference>* vector_refs_;
+ ScopedArenaSet<ArrayReference>* vector_refs_;
// Dynamic loop peeling candidate for alignment.
const ArrayReference* vector_peeling_candidate_;
@@ -262,11 +264,11 @@
// loop (mode is kSequential) and the actual vector loop (mode is kVector). The data
// structure maps original instructions into the new instructions.
// Contents reside in phase-local heap memory.
- ArenaSafeMap<HInstruction*, HInstruction*>* vector_map_;
+ ScopedArenaSafeMap<HInstruction*, HInstruction*>* vector_map_;
// Permanent mapping used during vectorization synthesis.
// Contents reside in phase-local heap memory.
- ArenaSafeMap<HInstruction*, HInstruction*>* vector_permanent_map_;
+ ScopedArenaSafeMap<HInstruction*, HInstruction*>* vector_permanent_map_;
// Temporary vectorization bookkeeping.
VectorMode vector_mode_; // synthesis mode
diff --git a/compiler/optimizing/loop_optimization_test.cc b/compiler/optimizing/loop_optimization_test.cc
index 95718ae..4e1857d 100644
--- a/compiler/optimizing/loop_optimization_test.cc
+++ b/compiler/optimizing/loop_optimization_test.cc
@@ -24,14 +24,12 @@
* constructing the loop hierarchy. Actual optimizations are tested
* through the checker tests.
*/
-class LoopOptimizationTest : public CommonCompilerTest {
+class LoopOptimizationTest : public OptimizingUnitTest {
public:
LoopOptimizationTest()
- : pool_(),
- allocator_(&pool_),
- graph_(CreateGraph(&allocator_)),
- iva_(new (&allocator_) HInductionVarAnalysis(graph_)),
- loop_opt_(new (&allocator_) HLoopOptimization(graph_, nullptr, iva_, nullptr)) {
+ : graph_(CreateGraph()),
+ iva_(new (GetAllocator()) HInductionVarAnalysis(graph_)),
+ loop_opt_(new (GetAllocator()) HLoopOptimization(graph_, nullptr, iva_, nullptr)) {
BuildGraph();
}
@@ -40,38 +38,38 @@
/** Constructs bare minimum graph. */
void BuildGraph() {
graph_->SetNumberOfVRegs(1);
- entry_block_ = new (&allocator_) HBasicBlock(graph_);
- return_block_ = new (&allocator_) HBasicBlock(graph_);
- exit_block_ = new (&allocator_) HBasicBlock(graph_);
+ entry_block_ = new (GetAllocator()) HBasicBlock(graph_);
+ return_block_ = new (GetAllocator()) HBasicBlock(graph_);
+ exit_block_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry_block_);
graph_->AddBlock(return_block_);
graph_->AddBlock(exit_block_);
graph_->SetEntryBlock(entry_block_);
graph_->SetExitBlock(exit_block_);
- parameter_ = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kInt32);
+ parameter_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kInt32);
entry_block_->AddInstruction(parameter_);
- return_block_->AddInstruction(new (&allocator_) HReturnVoid());
- exit_block_->AddInstruction(new (&allocator_) HExit());
+ return_block_->AddInstruction(new (GetAllocator()) HReturnVoid());
+ exit_block_->AddInstruction(new (GetAllocator()) HExit());
entry_block_->AddSuccessor(return_block_);
return_block_->AddSuccessor(exit_block_);
}
/** Adds a loop nest at given position before successor. */
HBasicBlock* AddLoop(HBasicBlock* position, HBasicBlock* successor) {
- HBasicBlock* header = new (&allocator_) HBasicBlock(graph_);
- HBasicBlock* body = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* header = new (GetAllocator()) HBasicBlock(graph_);
+ HBasicBlock* body = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(header);
graph_->AddBlock(body);
// Control flow.
position->ReplaceSuccessor(successor, header);
header->AddSuccessor(body);
header->AddSuccessor(successor);
- header->AddInstruction(new (&allocator_) HIf(parameter_));
+ header->AddInstruction(new (GetAllocator()) HIf(parameter_));
body->AddSuccessor(header);
- body->AddInstruction(new (&allocator_) HGoto());
+ body->AddInstruction(new (GetAllocator()) HGoto());
return header;
}
@@ -80,7 +78,8 @@
graph_->BuildDominatorTree();
iva_->Run();
// Do not release the loop hierarchy.
- loop_opt_->loop_allocator_ = &allocator_;
+ ScopedArenaAllocator loop_allocator(GetArenaStack());
+ loop_opt_->loop_allocator_ = &loop_allocator;
loop_opt_->LocalRun();
}
@@ -101,8 +100,6 @@
}
// General building fields.
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
HInductionVarAnalysis* iva_;
HLoopOptimization* loop_opt_;
@@ -199,8 +196,8 @@
// predecessors.
TEST_F(LoopOptimizationTest, SimplifyLoop) {
// Can't use AddLoop as we want special order for blocks predecessors.
- HBasicBlock* header = new (&allocator_) HBasicBlock(graph_);
- HBasicBlock* body = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* header = new (GetAllocator()) HBasicBlock(graph_);
+ HBasicBlock* body = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(header);
graph_->AddBlock(body);
@@ -213,11 +210,11 @@
DCHECK(header->GetSuccessors()[1] == return_block_);
// Data flow.
- header->AddInstruction(new (&allocator_) HIf(parameter_));
- body->AddInstruction(new (&allocator_) HGoto());
+ header->AddInstruction(new (GetAllocator()) HIf(parameter_));
+ body->AddInstruction(new (GetAllocator()) HGoto());
- HPhi* phi = new (&allocator_) HPhi(&allocator_, 0, 0, DataType::Type::kInt32);
- HInstruction* add = new (&allocator_) HAdd(DataType::Type::kInt32, phi, parameter_);
+ HPhi* phi = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32);
+ HInstruction* add = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi, parameter_);
header->AddPhi(phi);
body->AddInstruction(add);
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 41ea998..1a537ca 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -56,13 +56,13 @@
DCHECK_EQ(visited->GetHighestBitSet(), -1);
// Nodes that we're currently visiting, indexed by block id.
- ArenaBitVector visiting(arena_, blocks_.size(), false, kArenaAllocGraphBuilder);
+ ArenaBitVector visiting(allocator_, blocks_.size(), false, kArenaAllocGraphBuilder);
// Number of successors visited from a given node, indexed by block id.
ArenaVector<size_t> successors_visited(blocks_.size(),
0u,
- arena_->Adapter(kArenaAllocGraphBuilder));
+ allocator_->Adapter(kArenaAllocGraphBuilder));
// Stack of nodes that we're currently visiting (same as marked in "visiting" above).
- ArenaVector<HBasicBlock*> worklist(arena_->Adapter(kArenaAllocGraphBuilder));
+ ArenaVector<HBasicBlock*> worklist(allocator_->Adapter(kArenaAllocGraphBuilder));
constexpr size_t kDefaultWorklistSize = 8;
worklist.reserve(kDefaultWorklistSize);
visited->SetBit(entry_block_->GetBlockId());
@@ -173,7 +173,7 @@
}
GraphAnalysisResult HGraph::BuildDominatorTree() {
- ArenaBitVector visited(arena_, blocks_.size(), false, kArenaAllocGraphBuilder);
+ ArenaBitVector visited(allocator_, blocks_.size(), false, kArenaAllocGraphBuilder);
// (1) Find the back edges in the graph doing a DFS traversal.
FindBackEdges(&visited);
@@ -259,13 +259,13 @@
reverse_post_order_.push_back(entry_block_);
// Number of visits of a given node, indexed by block id.
- ArenaVector<size_t> visits(blocks_.size(), 0u, arena_->Adapter(kArenaAllocGraphBuilder));
+ ArenaVector<size_t> visits(blocks_.size(), 0u, allocator_->Adapter(kArenaAllocGraphBuilder));
// Number of successors visited from a given node, indexed by block id.
ArenaVector<size_t> successors_visited(blocks_.size(),
0u,
- arena_->Adapter(kArenaAllocGraphBuilder));
+ allocator_->Adapter(kArenaAllocGraphBuilder));
// Nodes for which we need to visit successors.
- ArenaVector<HBasicBlock*> worklist(arena_->Adapter(kArenaAllocGraphBuilder));
+ ArenaVector<HBasicBlock*> worklist(allocator_->Adapter(kArenaAllocGraphBuilder));
constexpr size_t kDefaultWorklistSize = 8;
worklist.reserve(kDefaultWorklistSize);
worklist.push_back(entry_block_);
@@ -335,7 +335,7 @@
}
HBasicBlock* HGraph::SplitEdge(HBasicBlock* block, HBasicBlock* successor) {
- HBasicBlock* new_block = new (arena_) HBasicBlock(this, successor->GetDexPc());
+ HBasicBlock* new_block = new (allocator_) HBasicBlock(this, successor->GetDexPc());
AddBlock(new_block);
// Use `InsertBetween` to ensure the predecessor index and successor index of
// `block` and `successor` are preserved.
@@ -347,7 +347,7 @@
// Insert a new node between `block` and `successor` to split the
// critical edge.
HBasicBlock* new_block = SplitEdge(block, successor);
- new_block->AddInstruction(new (arena_) HGoto(successor->GetDexPc()));
+ new_block->AddInstruction(new (allocator_) HGoto(successor->GetDexPc()));
if (successor->IsLoopHeader()) {
// If we split at a back edge boundary, make the new block the back edge.
HLoopInformation* info = successor->GetLoopInformation();
@@ -396,9 +396,9 @@
// this graph.
size_t number_of_incomings = header->GetPredecessors().size() - info->NumberOfBackEdges();
if (number_of_incomings != 1 || (GetEntryBlock()->GetSingleSuccessor() == header)) {
- HBasicBlock* pre_header = new (arena_) HBasicBlock(this, header->GetDexPc());
+ HBasicBlock* pre_header = new (allocator_) HBasicBlock(this, header->GetDexPc());
AddBlock(pre_header);
- pre_header->AddInstruction(new (arena_) HGoto(header->GetDexPc()));
+ pre_header->AddInstruction(new (allocator_) HGoto(header->GetDexPc()));
for (size_t pred = 0; pred < header->GetPredecessors().size(); ++pred) {
HBasicBlock* predecessor = header->GetPredecessors()[pred];
@@ -440,7 +440,7 @@
try_entry != &block->GetTryCatchInformation()->GetTryEntry())) {
// We are either setting try block membership for the first time or it
// has changed.
- block->SetTryCatchInformation(new (arena_) TryCatchInformation(*try_entry));
+ block->SetTryCatchInformation(new (allocator_) TryCatchInformation(*try_entry));
}
}
}
@@ -547,7 +547,7 @@
// not null and not in a block. Otherwise, we need to clear the instruction
// id and/or any invariants the graph is assuming when adding new instructions.
if ((cached_null_constant_ == nullptr) || (cached_null_constant_->GetBlock() == nullptr)) {
- cached_null_constant_ = new (arena_) HNullConstant(dex_pc);
+ cached_null_constant_ = new (allocator_) HNullConstant(dex_pc);
cached_null_constant_->SetReferenceTypeInfo(inexact_object_rti_);
InsertConstant(cached_null_constant_);
}
@@ -563,7 +563,7 @@
// not null and not in a block. Otherwise, we need to clear the instruction
// id and/or any invariants the graph is assuming when adding new instructions.
if ((cached_current_method_ == nullptr) || (cached_current_method_->GetBlock() == nullptr)) {
- cached_current_method_ = new (arena_) HCurrentMethod(
+ cached_current_method_ = new (allocator_) HCurrentMethod(
Is64BitInstructionSet(instruction_set_) ? DataType::Type::kInt64 : DataType::Type::kInt32,
entry_block_->GetDexPc());
if (entry_block_->GetFirstInstruction() == nullptr) {
@@ -590,6 +590,7 @@
case DataType::Type::kBool:
DCHECK(IsUint<1>(value));
FALLTHROUGH_INTENDED;
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -709,7 +710,7 @@
bool is_irreducible_loop = HasBackEdgeNotDominatedByHeader();
if (is_irreducible_loop) {
- ArenaBitVector visited(graph->GetArena(),
+ ArenaBitVector visited(graph->GetAllocator(),
graph->GetBlocks().size(),
/* expandable */ false,
kArenaAllocGraphBuilder);
@@ -1654,8 +1655,8 @@
DCHECK(!graph_->IsInSsaForm()) << "Support for SSA form not implemented.";
DCHECK_EQ(cursor->GetBlock(), this);
- HBasicBlock* new_block = new (GetGraph()->GetArena()) HBasicBlock(GetGraph(),
- cursor->GetDexPc());
+ HBasicBlock* new_block =
+ new (GetGraph()->GetAllocator()) HBasicBlock(GetGraph(), cursor->GetDexPc());
new_block->instructions_.first_instruction_ = cursor;
new_block->instructions_.last_instruction_ = instructions_.last_instruction_;
instructions_.last_instruction_ = cursor->previous_;
@@ -1667,7 +1668,7 @@
}
new_block->instructions_.SetBlockOfInstructions(new_block);
- AddInstruction(new (GetGraph()->GetArena()) HGoto(new_block->GetDexPc()));
+ AddInstruction(new (GetGraph()->GetAllocator()) HGoto(new_block->GetDexPc()));
for (HBasicBlock* successor : GetSuccessors()) {
successor->predecessors_[successor->GetPredecessorIndexOf(this)] = new_block;
@@ -1684,7 +1685,7 @@
DCHECK(!graph_->IsInSsaForm()) << "Support for SSA form not implemented.";
DCHECK(!IsCatchBlock()) << "Support for updating try/catch information not implemented.";
- HBasicBlock* new_block = new (GetGraph()->GetArena()) HBasicBlock(GetGraph(), GetDexPc());
+ HBasicBlock* new_block = new (GetGraph()->GetAllocator()) HBasicBlock(GetGraph(), GetDexPc());
for (HBasicBlock* predecessor : GetPredecessors()) {
predecessor->successors_[predecessor->GetSuccessorIndexOf(this)] = new_block;
@@ -1700,8 +1701,8 @@
HBasicBlock* HBasicBlock::SplitBeforeForInlining(HInstruction* cursor) {
DCHECK_EQ(cursor->GetBlock(), this);
- HBasicBlock* new_block = new (GetGraph()->GetArena()) HBasicBlock(GetGraph(),
- cursor->GetDexPc());
+ HBasicBlock* new_block =
+ new (GetGraph()->GetAllocator()) HBasicBlock(GetGraph(), cursor->GetDexPc());
new_block->instructions_.first_instruction_ = cursor;
new_block->instructions_.last_instruction_ = instructions_.last_instruction_;
instructions_.last_instruction_ = cursor->previous_;
@@ -1733,7 +1734,7 @@
DCHECK_NE(instructions_.last_instruction_, cursor);
DCHECK_EQ(cursor->GetBlock(), this);
- HBasicBlock* new_block = new (GetGraph()->GetArena()) HBasicBlock(GetGraph(), GetDexPc());
+ HBasicBlock* new_block = new (GetGraph()->GetAllocator()) HBasicBlock(GetGraph(), GetDexPc());
new_block->instructions_.first_instruction_ = cursor->GetNext();
new_block->instructions_.last_instruction_ = instructions_.last_instruction_;
cursor->next_->previous_ = nullptr;
@@ -2029,7 +2030,7 @@
last_instruction->IsPackedSwitch() ||
(last_instruction->IsTryBoundary() && IsCatchBlock()));
predecessor->RemoveInstruction(last_instruction);
- predecessor->AddInstruction(new (graph_->GetArena()) HGoto(last_instruction->GetDexPc()));
+ predecessor->AddInstruction(new (graph_->GetAllocator()) HGoto(last_instruction->GetDexPc()));
} else if (num_pred_successors == 0u) {
// The predecessor has no remaining successors and therefore must be dead.
// We deliberately leave it without a control-flow instruction so that the
@@ -2240,7 +2241,7 @@
if (current->NeedsEnvironment()) {
DCHECK(current->HasEnvironment());
current->GetEnvironment()->SetAndCopyParentChain(
- outer_graph->GetArena(), invoke->GetEnvironment());
+ outer_graph->GetAllocator(), invoke->GetEnvironment());
}
}
}
@@ -2293,7 +2294,7 @@
// into two blocks, merge the first block of the inlined graph into
// the first half, and replace the exit block of the inlined graph
// with the second half.
- ArenaAllocator* allocator = outer_graph->GetArena();
+ ArenaAllocator* allocator = outer_graph->GetAllocator();
HBasicBlock* at = invoke->GetBlock();
// Note that we split before the invoke only to simplify polymorphic inlining.
HBasicBlock* to = at->SplitBeforeForInlining(invoke);
@@ -2477,10 +2478,10 @@
HBasicBlock* old_pre_header = header->GetDominator();
// Need extra block to avoid critical edge.
- HBasicBlock* if_block = new (arena_) HBasicBlock(this, header->GetDexPc());
- HBasicBlock* true_block = new (arena_) HBasicBlock(this, header->GetDexPc());
- HBasicBlock* false_block = new (arena_) HBasicBlock(this, header->GetDexPc());
- HBasicBlock* new_pre_header = new (arena_) HBasicBlock(this, header->GetDexPc());
+ HBasicBlock* if_block = new (allocator_) HBasicBlock(this, header->GetDexPc());
+ HBasicBlock* true_block = new (allocator_) HBasicBlock(this, header->GetDexPc());
+ HBasicBlock* false_block = new (allocator_) HBasicBlock(this, header->GetDexPc());
+ HBasicBlock* new_pre_header = new (allocator_) HBasicBlock(this, header->GetDexPc());
AddBlock(if_block);
AddBlock(true_block);
AddBlock(false_block);
@@ -2535,9 +2536,9 @@
HLoopInformation* loop = header->GetLoopInformation();
// Add new loop blocks.
- HBasicBlock* new_pre_header = new (arena_) HBasicBlock(this, header->GetDexPc());
- HBasicBlock* new_header = new (arena_) HBasicBlock(this, header->GetDexPc());
- HBasicBlock* new_body = new (arena_) HBasicBlock(this, header->GetDexPc());
+ HBasicBlock* new_pre_header = new (allocator_) HBasicBlock(this, header->GetDexPc());
+ HBasicBlock* new_header = new (allocator_) HBasicBlock(this, header->GetDexPc());
+ HBasicBlock* new_body = new (allocator_) HBasicBlock(this, header->GetDexPc());
AddBlock(new_pre_header);
AddBlock(new_header);
AddBlock(new_body);
@@ -2569,10 +2570,10 @@
reverse_post_order_[index_of_body] = new_body;
// Add gotos and suspend check (client must add conditional in header).
- new_pre_header->AddInstruction(new (arena_) HGoto());
- HSuspendCheck* suspend_check = new (arena_) HSuspendCheck(header->GetDexPc());
+ new_pre_header->AddInstruction(new (allocator_) HGoto());
+ HSuspendCheck* suspend_check = new (allocator_) HSuspendCheck(header->GetDexPc());
new_header->AddInstruction(suspend_check);
- new_body->AddInstruction(new (arena_) HGoto());
+ new_body->AddInstruction(new (allocator_) HGoto());
suspend_check->CopyEnvironmentFromWithLoopPhiAdjustment(
loop->GetSuspendCheck()->GetEnvironment(), header);
@@ -2890,7 +2891,7 @@
// Returns an instruction with the opposite Boolean value from 'cond'.
HInstruction* HGraph::InsertOppositeCondition(HInstruction* cond, HInstruction* cursor) {
- ArenaAllocator* allocator = GetArena();
+ ArenaAllocator* allocator = GetAllocator();
if (cond->IsCondition() &&
!DataType::IsFloatingPointType(cond->InputAt(0)->GetType())) {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index c49cee3..99fde75 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -45,6 +45,7 @@
namespace art {
+class ArenaStack;
class GraphChecker;
class HBasicBlock;
class HConstructorFence;
@@ -305,7 +306,8 @@
// Control-flow graph of a method. Contains a list of basic blocks.
class HGraph : public ArenaObject<kArenaAllocGraph> {
public:
- HGraph(ArenaAllocator* arena,
+ HGraph(ArenaAllocator* allocator,
+ ArenaStack* arena_stack,
const DexFile& dex_file,
uint32_t method_idx,
InstructionSet instruction_set,
@@ -313,10 +315,11 @@
bool debuggable = false,
bool osr = false,
int start_instruction_id = 0)
- : arena_(arena),
- blocks_(arena->Adapter(kArenaAllocBlockList)),
- reverse_post_order_(arena->Adapter(kArenaAllocReversePostOrder)),
- linear_order_(arena->Adapter(kArenaAllocLinearOrder)),
+ : allocator_(allocator),
+ arena_stack_(arena_stack),
+ blocks_(allocator->Adapter(kArenaAllocBlockList)),
+ reverse_post_order_(allocator->Adapter(kArenaAllocReversePostOrder)),
+ linear_order_(allocator->Adapter(kArenaAllocLinearOrder)),
entry_block_(nullptr),
exit_block_(nullptr),
maximum_number_of_out_vregs_(0),
@@ -337,22 +340,23 @@
number_of_cha_guards_(0),
instruction_set_(instruction_set),
cached_null_constant_(nullptr),
- cached_int_constants_(std::less<int32_t>(), arena->Adapter(kArenaAllocConstantsMap)),
- cached_float_constants_(std::less<int32_t>(), arena->Adapter(kArenaAllocConstantsMap)),
- cached_long_constants_(std::less<int64_t>(), arena->Adapter(kArenaAllocConstantsMap)),
- cached_double_constants_(std::less<int64_t>(), arena->Adapter(kArenaAllocConstantsMap)),
+ cached_int_constants_(std::less<int32_t>(), allocator->Adapter(kArenaAllocConstantsMap)),
+ cached_float_constants_(std::less<int32_t>(), allocator->Adapter(kArenaAllocConstantsMap)),
+ cached_long_constants_(std::less<int64_t>(), allocator->Adapter(kArenaAllocConstantsMap)),
+ cached_double_constants_(std::less<int64_t>(), allocator->Adapter(kArenaAllocConstantsMap)),
cached_current_method_(nullptr),
art_method_(nullptr),
inexact_object_rti_(ReferenceTypeInfo::CreateInvalid()),
osr_(osr),
- cha_single_implementation_list_(arena->Adapter(kArenaAllocCHA)) {
+ cha_single_implementation_list_(allocator->Adapter(kArenaAllocCHA)) {
blocks_.reserve(kDefaultNumberOfBlocks);
}
// Acquires and stores RTI of inexact Object to be used when creating HNullConstant.
void InitializeInexactObjectRTI(VariableSizedHandleScope* handles);
- ArenaAllocator* GetArena() const { return arena_; }
+ ArenaAllocator* GetAllocator() const { return allocator_; }
+ ArenaStack* GetArenaStack() const { return arena_stack_; }
const ArenaVector<HBasicBlock*>& GetBlocks() const { return blocks_; }
bool IsInSsaForm() const { return in_ssa_form_; }
@@ -613,7 +617,7 @@
// If not found or previously deleted, create and cache a new instruction.
// Don't bother reviving a previously deleted instruction, for simplicity.
if (constant == nullptr || constant->GetBlock() == nullptr) {
- constant = new (arena_) InstructionType(value, dex_pc);
+ constant = new (allocator_) InstructionType(value, dex_pc);
cache->Overwrite(value, constant);
InsertConstant(constant);
}
@@ -629,7 +633,8 @@
// See CacheFloatConstant comment.
void CacheDoubleConstant(HDoubleConstant* constant);
- ArenaAllocator* const arena_;
+ ArenaAllocator* const allocator_;
+ ArenaStack* const arena_stack_;
// List of blocks in insertion order.
ArenaVector<HBasicBlock*> blocks_;
@@ -751,9 +756,12 @@
suspend_check_(nullptr),
irreducible_(false),
contains_irreducible_loop_(false),
- back_edges_(graph->GetArena()->Adapter(kArenaAllocLoopInfoBackEdges)),
+ back_edges_(graph->GetAllocator()->Adapter(kArenaAllocLoopInfoBackEdges)),
// Make bit vector growable, as the number of blocks may change.
- blocks_(graph->GetArena(), graph->GetBlocks().size(), true, kArenaAllocLoopInfoBackEdges) {
+ blocks_(graph->GetAllocator(),
+ graph->GetBlocks().size(),
+ true,
+ kArenaAllocLoopInfoBackEdges) {
back_edges_.reserve(kDefaultNumberOfBackEdges);
}
@@ -916,11 +924,11 @@
public:
explicit HBasicBlock(HGraph* graph, uint32_t dex_pc = kNoDexPc)
: graph_(graph),
- predecessors_(graph->GetArena()->Adapter(kArenaAllocPredecessors)),
- successors_(graph->GetArena()->Adapter(kArenaAllocSuccessors)),
+ predecessors_(graph->GetAllocator()->Adapter(kArenaAllocPredecessors)),
+ successors_(graph->GetAllocator()->Adapter(kArenaAllocSuccessors)),
loop_information_(nullptr),
dominator_(nullptr),
- dominated_blocks_(graph->GetArena()->Adapter(kArenaAllocDominated)),
+ dominated_blocks_(graph->GetAllocator()->Adapter(kArenaAllocDominated)),
block_id_(kInvalidBlockId),
dex_pc_(dex_pc),
lifetime_start_(kNoLifetime),
@@ -972,7 +980,7 @@
void AddBackEdge(HBasicBlock* back_edge) {
if (loop_information_ == nullptr) {
- loop_information_ = new (graph_->GetArena()) HLoopInformation(this, graph_);
+ loop_information_ = new (graph_->GetAllocator()) HLoopInformation(this, graph_);
}
DCHECK_EQ(loop_information_->GetHeader(), this);
loop_information_->AddBackEdge(back_edge);
@@ -1423,7 +1431,8 @@
#else
#define FOR_EACH_CONCRETE_INSTRUCTION_MIPS(M) \
M(MipsComputeBaseMethodAddress, Instruction) \
- M(MipsPackedSwitch, Instruction)
+ M(MipsPackedSwitch, Instruction) \
+ M(IntermediateArrayAddressIndex, Instruction)
#endif
#define FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(M)
@@ -1760,14 +1769,26 @@
static constexpr uint64_t kAllReads =
((1ULL << (kLastBitForReads + 1 - kFieldReadOffset)) - 1) << kFieldReadOffset;
- // Translates type to bit flag.
+ // Translates type to bit flag. The type must correspond to a Java type.
static uint64_t TypeFlag(DataType::Type type, int offset) {
- CHECK_NE(type, DataType::Type::kVoid);
- const uint64_t one = 1;
- const int shift = static_cast<int>(type); // 0-based consecutive enum
+ int shift;
+ switch (type) {
+ case DataType::Type::kReference: shift = 0; break;
+ case DataType::Type::kBool: shift = 1; break;
+ case DataType::Type::kInt8: shift = 2; break;
+ case DataType::Type::kUint16: shift = 3; break;
+ case DataType::Type::kInt16: shift = 4; break;
+ case DataType::Type::kInt32: shift = 5; break;
+ case DataType::Type::kInt64: shift = 6; break;
+ case DataType::Type::kFloat32: shift = 7; break;
+ case DataType::Type::kFloat64: shift = 8; break;
+ default:
+ LOG(FATAL) << "Unexpected data type " << type;
+ UNREACHABLE();
+ }
DCHECK_LE(kFieldWriteOffset, shift);
DCHECK_LT(shift, kArrayWriteOffset);
- return one << (shift + offset);
+ return UINT64_C(1) << (shift + offset);
}
// Private constructor on direct flags value.
@@ -1779,21 +1800,23 @@
// A HEnvironment object contains the values of virtual registers at a given location.
class HEnvironment : public ArenaObject<kArenaAllocEnvironment> {
public:
- ALWAYS_INLINE HEnvironment(ArenaAllocator* arena,
+ ALWAYS_INLINE HEnvironment(ArenaAllocator* allocator,
size_t number_of_vregs,
ArtMethod* method,
uint32_t dex_pc,
HInstruction* holder)
- : vregs_(number_of_vregs, arena->Adapter(kArenaAllocEnvironmentVRegs)),
- locations_(arena->Adapter(kArenaAllocEnvironmentLocations)),
+ : vregs_(number_of_vregs, allocator->Adapter(kArenaAllocEnvironmentVRegs)),
+ locations_(allocator->Adapter(kArenaAllocEnvironmentLocations)),
parent_(nullptr),
method_(method),
dex_pc_(dex_pc),
holder_(holder) {
}
- ALWAYS_INLINE HEnvironment(ArenaAllocator* arena, const HEnvironment& to_copy, HInstruction* holder)
- : HEnvironment(arena,
+ ALWAYS_INLINE HEnvironment(ArenaAllocator* allocator,
+ const HEnvironment& to_copy,
+ HInstruction* holder)
+ : HEnvironment(allocator,
to_copy.Size(),
to_copy.GetMethod(),
to_copy.GetDexPc(),
@@ -1912,7 +1935,7 @@
HInstruction* GetPreviousDisregardingMoves() const;
HBasicBlock* GetBlock() const { return block_; }
- ArenaAllocator* GetArena() const { return block_->GetGraph()->GetArena(); }
+ ArenaAllocator* GetAllocator() const { return block_->GetGraph()->GetAllocator(); }
void SetBlock(HBasicBlock* block) { block_ = block; }
bool IsInBlock() const { return block_ != nullptr; }
bool IsInLoop() const { return block_->IsInLoop(); }
@@ -2002,7 +2025,7 @@
// Note: fixup_end remains valid across push_front().
auto fixup_end = uses_.empty() ? uses_.begin() : ++uses_.begin();
HUseListNode<HInstruction*>* new_node =
- new (GetBlock()->GetGraph()->GetArena()) HUseListNode<HInstruction*>(user, index);
+ new (GetBlock()->GetGraph()->GetAllocator()) HUseListNode<HInstruction*>(user, index);
uses_.push_front(*new_node);
FixUpUserRecordsAfterUseInsertion(fixup_end);
}
@@ -2012,7 +2035,7 @@
// Note: env_fixup_end remains valid across push_front().
auto env_fixup_end = env_uses_.empty() ? env_uses_.begin() : ++env_uses_.begin();
HUseListNode<HEnvironment*>* new_node =
- new (GetBlock()->GetGraph()->GetArena()) HUseListNode<HEnvironment*>(user, index);
+ new (GetBlock()->GetGraph()->GetAllocator()) HUseListNode<HEnvironment*>(user, index);
env_uses_.push_front(*new_node);
FixUpUserRecordsAfterEnvUseInsertion(env_fixup_end);
}
@@ -2095,7 +2118,7 @@
// copying, the uses lists are being updated.
void CopyEnvironmentFrom(HEnvironment* environment) {
DCHECK(environment_ == nullptr);
- ArenaAllocator* allocator = GetBlock()->GetGraph()->GetArena();
+ ArenaAllocator* allocator = GetBlock()->GetGraph()->GetAllocator();
environment_ = new (allocator) HEnvironment(allocator, *environment, this);
environment_->CopyFrom(environment);
if (environment->GetParent() != nullptr) {
@@ -2106,7 +2129,7 @@
void CopyEnvironmentFromWithLoopPhiAdjustment(HEnvironment* environment,
HBasicBlock* block) {
DCHECK(environment_ == nullptr);
- ArenaAllocator* allocator = GetBlock()->GetGraph()->GetArena();
+ ArenaAllocator* allocator = GetBlock()->GetGraph()->GetAllocator();
environment_ = new (allocator) HEnvironment(allocator, *environment, this);
environment_->CopyFromWithLoopPhiAdjustment(environment, block);
if (environment->GetParent() != nullptr) {
@@ -2454,11 +2477,11 @@
protected:
HVariableInputSizeInstruction(SideEffects side_effects,
uint32_t dex_pc,
- ArenaAllocator* arena,
+ ArenaAllocator* allocator,
size_t number_of_inputs,
ArenaAllocKind kind)
: HInstruction(side_effects, dex_pc),
- inputs_(number_of_inputs, arena->Adapter(kind)) {}
+ inputs_(number_of_inputs, allocator->Adapter(kind)) {}
ArenaVector<HUserRecord<HInstruction*>> inputs_;
@@ -2559,7 +2582,7 @@
class HPhi FINAL : public HVariableInputSizeInstruction {
public:
- HPhi(ArenaAllocator* arena,
+ HPhi(ArenaAllocator* allocator,
uint32_t reg_number,
size_t number_of_inputs,
DataType::Type type,
@@ -2567,7 +2590,7 @@
: HVariableInputSizeInstruction(
SideEffects::None(),
dex_pc,
- arena,
+ allocator,
number_of_inputs,
kArenaAllocPhiInputs),
reg_number_(reg_number) {
@@ -3006,11 +3029,14 @@
public:
// Use this constructor when the `HDeoptimize` acts as a barrier, where no code can move
// across.
- HDeoptimize(ArenaAllocator* arena, HInstruction* cond, DeoptimizationKind kind, uint32_t dex_pc)
+ HDeoptimize(ArenaAllocator* allocator,
+ HInstruction* cond,
+ DeoptimizationKind kind,
+ uint32_t dex_pc)
: HVariableInputSizeInstruction(
SideEffects::All(),
dex_pc,
- arena,
+ allocator,
/* number_of_inputs */ 1,
kArenaAllocMisc) {
SetPackedFlag<kFieldCanBeMoved>(false);
@@ -3023,7 +3049,7 @@
// instead of `guard`.
// We set CanTriggerGC to prevent any intermediate address to be live
// at the point of the `HDeoptimize`.
- HDeoptimize(ArenaAllocator* arena,
+ HDeoptimize(ArenaAllocator* allocator,
HInstruction* cond,
HInstruction* guard,
DeoptimizationKind kind,
@@ -3031,7 +3057,7 @@
: HVariableInputSizeInstruction(
SideEffects::CanTriggerGC(),
dex_pc,
- arena,
+ allocator,
/* number_of_inputs */ 2,
kArenaAllocMisc) {
SetPackedFlag<kFieldCanBeMoved>(true);
@@ -3095,8 +3121,8 @@
public:
// CHA guards are only optimized in a separate pass and it has no side effects
// with regard to other passes.
- HShouldDeoptimizeFlag(ArenaAllocator* arena, uint32_t dex_pc)
- : HVariableInputSizeInstruction(SideEffects::None(), dex_pc, arena, 0, kArenaAllocCHA) {
+ HShouldDeoptimizeFlag(ArenaAllocator* allocator, uint32_t dex_pc)
+ : HVariableInputSizeInstruction(SideEffects::None(), dex_pc, allocator, 0, kArenaAllocCHA) {
}
DataType::Type GetType() const OVERRIDE { return DataType::Type::kInt32; }
@@ -4063,7 +4089,7 @@
using InvokeTypeField = BitField<InvokeType, kFieldInvokeType, kFieldInvokeTypeSize>;
using ReturnTypeField = BitField<DataType::Type, kFieldReturnType, kFieldReturnTypeSize>;
- HInvoke(ArenaAllocator* arena,
+ HInvoke(ArenaAllocator* allocator,
uint32_t number_of_arguments,
uint32_t number_of_other_inputs,
DataType::Type return_type,
@@ -4074,7 +4100,7 @@
: HVariableInputSizeInstruction(
SideEffects::AllExceptGCDependency(), // Assume write/read on all fields/arrays.
dex_pc,
- arena,
+ allocator,
number_of_arguments + number_of_other_inputs,
kArenaAllocInvokeInputs),
number_of_arguments_(number_of_arguments),
@@ -4101,13 +4127,13 @@
class HInvokeUnresolved FINAL : public HInvoke {
public:
- HInvokeUnresolved(ArenaAllocator* arena,
+ HInvokeUnresolved(ArenaAllocator* allocator,
uint32_t number_of_arguments,
DataType::Type return_type,
uint32_t dex_pc,
uint32_t dex_method_index,
InvokeType invoke_type)
- : HInvoke(arena,
+ : HInvoke(allocator,
number_of_arguments,
0u /* number_of_other_inputs */,
return_type,
@@ -4125,12 +4151,12 @@
class HInvokePolymorphic FINAL : public HInvoke {
public:
- HInvokePolymorphic(ArenaAllocator* arena,
+ HInvokePolymorphic(ArenaAllocator* allocator,
uint32_t number_of_arguments,
DataType::Type return_type,
uint32_t dex_pc,
uint32_t dex_method_index)
- : HInvoke(arena,
+ : HInvoke(allocator,
number_of_arguments,
0u /* number_of_other_inputs */,
return_type,
@@ -4202,7 +4228,7 @@
uint64_t method_load_data;
};
- HInvokeStaticOrDirect(ArenaAllocator* arena,
+ HInvokeStaticOrDirect(ArenaAllocator* allocator,
uint32_t number_of_arguments,
DataType::Type return_type,
uint32_t dex_pc,
@@ -4212,7 +4238,7 @@
InvokeType invoke_type,
MethodReference target_method,
ClinitCheckRequirement clinit_check_requirement)
- : HInvoke(arena,
+ : HInvoke(allocator,
number_of_arguments,
// There is potentially one extra argument for the HCurrentMethod node, and
// potentially one other if the clinit check is explicit, and potentially
@@ -4397,14 +4423,14 @@
class HInvokeVirtual FINAL : public HInvoke {
public:
- HInvokeVirtual(ArenaAllocator* arena,
+ HInvokeVirtual(ArenaAllocator* allocator,
uint32_t number_of_arguments,
DataType::Type return_type,
uint32_t dex_pc,
uint32_t dex_method_index,
ArtMethod* resolved_method,
uint32_t vtable_index)
- : HInvoke(arena,
+ : HInvoke(allocator,
number_of_arguments,
0u,
return_type,
@@ -4445,14 +4471,14 @@
class HInvokeInterface FINAL : public HInvoke {
public:
- HInvokeInterface(ArenaAllocator* arena,
+ HInvokeInterface(ArenaAllocator* allocator,
uint32_t number_of_arguments,
DataType::Type return_type,
uint32_t dex_pc,
uint32_t dex_method_index,
ArtMethod* resolved_method,
uint32_t imt_index)
- : HInvoke(arena,
+ : HInvoke(allocator,
number_of_arguments,
0u,
return_type,
@@ -5185,7 +5211,7 @@
class HTypeConversion FINAL : public HExpression<1> {
public:
// Instantiate a type conversion of `input` to `result_type`.
- HTypeConversion(DataType::Type result_type, HInstruction* input, uint32_t dex_pc)
+ HTypeConversion(DataType::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc)
: HExpression(result_type, SideEffects::None(), dex_pc) {
SetRawInputAt(0, input);
// Invariant: We should never generate a conversion to a Boolean value.
@@ -5382,9 +5408,21 @@
HArrayGet(HInstruction* array,
HInstruction* index,
DataType::Type type,
+ uint32_t dex_pc)
+ : HArrayGet(array,
+ index,
+ type,
+ SideEffects::ArrayReadOfType(type),
+ dex_pc,
+ /* is_string_char_at */ false) {}
+
+ HArrayGet(HInstruction* array,
+ HInstruction* index,
+ DataType::Type type,
+ SideEffects side_effects,
uint32_t dex_pc,
- bool is_string_char_at = false)
- : HExpression(type, SideEffects::ArrayReadOfType(type), dex_pc) {
+ bool is_string_char_at)
+ : HExpression(type, side_effects, dex_pc) {
SetPackedFlag<kFlagIsStringCharAt>(is_string_char_at);
SetRawInputAt(0, array);
SetRawInputAt(1, index);
@@ -5453,7 +5491,21 @@
HInstruction* value,
DataType::Type expected_component_type,
uint32_t dex_pc)
- : HTemplateInstruction(SideEffects::None(), dex_pc) {
+ : HArraySet(array,
+ index,
+ value,
+ expected_component_type,
+ // Make a best guess for side effects now, may be refined during SSA building.
+ ComputeSideEffects(GetComponentType(value->GetType(), expected_component_type)),
+ dex_pc) {}
+
+ HArraySet(HInstruction* array,
+ HInstruction* index,
+ HInstruction* value,
+ DataType::Type expected_component_type,
+ SideEffects side_effects,
+ uint32_t dex_pc)
+ : HTemplateInstruction(side_effects, dex_pc) {
SetPackedField<ExpectedComponentTypeField>(expected_component_type);
SetPackedFlag<kFlagNeedsTypeCheck>(value->GetType() == DataType::Type::kReference);
SetPackedFlag<kFlagValueCanBeNull>(true);
@@ -5461,8 +5513,6 @@
SetRawInputAt(0, array);
SetRawInputAt(1, index);
SetRawInputAt(2, value);
- // Make a best guess now, may be refined during SSA building.
- ComputeSideEffects();
}
bool NeedsEnvironment() const OVERRIDE {
@@ -5501,24 +5551,26 @@
HInstruction* GetValue() const { return InputAt(2); }
DataType::Type GetComponentType() const {
+ return GetComponentType(GetValue()->GetType(), GetRawExpectedComponentType());
+ }
+
+ static DataType::Type GetComponentType(DataType::Type value_type,
+ DataType::Type expected_component_type) {
// The Dex format does not type floating point index operations. Since the
- // `expected_component_type_` is set during building and can therefore not
+ // `expected_component_type` comes from SSA building and can therefore not
// be correct, we also check what is the value type. If it is a floating
// point type, we must use that type.
- DataType::Type value_type = GetValue()->GetType();
return ((value_type == DataType::Type::kFloat32) || (value_type == DataType::Type::kFloat64))
? value_type
- : GetRawExpectedComponentType();
+ : expected_component_type;
}
DataType::Type GetRawExpectedComponentType() const {
return GetPackedField<ExpectedComponentTypeField>();
}
- void ComputeSideEffects() {
- DataType::Type type = GetComponentType();
- SetSideEffects(SideEffects::ArrayWriteOfType(type).Union(
- SideEffectsForArchRuntimeCalls(type)));
+ static SideEffects ComputeSideEffects(DataType::Type type) {
+ return SideEffects::ArrayWriteOfType(type).Union(SideEffectsForArchRuntimeCalls(type));
}
static SideEffects SideEffectsForArchRuntimeCalls(DataType::Type value_type) {
@@ -6598,7 +6650,7 @@
// about the associated object.
HConstructorFence(HInstruction* fence_object,
uint32_t dex_pc,
- ArenaAllocator* arena)
+ ArenaAllocator* allocator)
// We strongly suspect there is not a more accurate way to describe the fine-grained reordering
// constraints described in the class header. We claim that these SideEffects constraints
// enforce a superset of the real constraints.
@@ -6622,7 +6674,7 @@
// we can refine the side effect to a smaller set of type reads (see above constraints).
: HVariableInputSizeInstruction(SideEffects::AllReads(),
dex_pc,
- arena,
+ allocator,
/* number_of_inputs */ 1,
kArenaAllocConstructorFenceInputs) {
DCHECK(fence_object != nullptr);
@@ -6839,9 +6891,9 @@
class HParallelMove FINAL : public HTemplateInstruction<0> {
public:
- explicit HParallelMove(ArenaAllocator* arena, uint32_t dex_pc = kNoDexPc)
+ explicit HParallelMove(ArenaAllocator* allocator, uint32_t dex_pc = kNoDexPc)
: HTemplateInstruction(SideEffects::None(), dex_pc),
- moves_(arena->Adapter(kArenaAllocMoveOperands)) {
+ moves_(allocator->Adapter(kArenaAllocMoveOperands)) {
moves_.reserve(kDefaultNumberOfMoves);
}
diff --git a/compiler/optimizing/nodes_mips.h b/compiler/optimizing/nodes_mips.h
index 80e652e..ef388c3 100644
--- a/compiler/optimizing/nodes_mips.h
+++ b/compiler/optimizing/nodes_mips.h
@@ -69,6 +69,46 @@
DISALLOW_COPY_AND_ASSIGN(HMipsPackedSwitch);
};
+// This instruction computes part of the array access offset (index offset).
+//
+// For array accesses the element address has the following structure:
+// Address = CONST_OFFSET + base_addr + index << ELEM_SHIFT. The address part
+// (index << ELEM_SHIFT) can be shared across array accesses with
+// the same data type and index. For example, in the following loop 5 accesses can share address
+// computation:
+//
+// void foo(int[] a, int[] b, int[] c) {
+// for (i...) {
+// a[i] = a[i] + 5;
+// b[i] = b[i] + c[i];
+// }
+// }
+//
+// Note: as the instruction doesn't involve base array address into computations it has no side
+// effects.
+class HIntermediateArrayAddressIndex FINAL : public HExpression<2> {
+ public:
+ HIntermediateArrayAddressIndex(HInstruction* index, HInstruction* shift, uint32_t dex_pc)
+ : HExpression(DataType::Type::kInt32, SideEffects::None(), dex_pc) {
+ SetRawInputAt(0, index);
+ SetRawInputAt(1, shift);
+ }
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+ bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ return true;
+ }
+ bool IsActualObject() const OVERRIDE { return false; }
+
+ HInstruction* GetIndex() const { return InputAt(0); }
+ HInstruction* GetShift() const { return InputAt(1); }
+
+ DECLARE_INSTRUCTION(IntermediateArrayAddressIndex);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HIntermediateArrayAddressIndex);
+};
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_NODES_MIPS_H_
diff --git a/compiler/optimizing/nodes_test.cc b/compiler/optimizing/nodes_test.cc
index ada6177..b2180d9 100644
--- a/compiler/optimizing/nodes_test.cc
+++ b/compiler/optimizing/nodes_test.cc
@@ -23,37 +23,36 @@
namespace art {
+class NodeTest : public OptimizingUnitTest {};
+
/**
* Test that removing instruction from the graph removes itself from user lists
* and environment lists.
*/
-TEST(Node, RemoveInstruction) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+TEST_F(NodeTest, RemoveInstruction) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
entry->AddInstruction(parameter);
- entry->AddInstruction(new (&allocator) HGoto());
+ entry->AddInstruction(new (GetAllocator()) HGoto());
- HBasicBlock* first_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* first_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(first_block);
entry->AddSuccessor(first_block);
- HInstruction* null_check = new (&allocator) HNullCheck(parameter, 0);
+ HInstruction* null_check = new (GetAllocator()) HNullCheck(parameter, 0);
first_block->AddInstruction(null_check);
- first_block->AddInstruction(new (&allocator) HReturnVoid());
+ first_block->AddInstruction(new (GetAllocator()) HReturnVoid());
- HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(exit_block);
first_block->AddSuccessor(exit_block);
- exit_block->AddInstruction(new (&allocator) HExit());
+ exit_block->AddInstruction(new (GetAllocator()) HExit());
- HEnvironment* environment = new (&allocator) HEnvironment(
- &allocator, 1, graph->GetArtMethod(), 0, null_check);
+ HEnvironment* environment = new (GetAllocator()) HEnvironment(
+ GetAllocator(), 1, graph->GetArtMethod(), 0, null_check);
null_check->SetRawEnvironment(environment);
environment->SetRawEnvAt(0, parameter);
parameter->AddEnvUseAt(null_check->GetEnvironment(), 0);
@@ -70,25 +69,22 @@
/**
* Test that inserting an instruction in the graph updates user lists.
*/
-TEST(Node, InsertInstruction) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+TEST_F(NodeTest, InsertInstruction) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter1 = new (&allocator) HParameterValue(
+ HInstruction* parameter1 = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
- HInstruction* parameter2 = new (&allocator) HParameterValue(
+ HInstruction* parameter2 = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
- entry->AddInstruction(new (&allocator) HExit());
+ entry->AddInstruction(new (GetAllocator()) HExit());
ASSERT_FALSE(parameter1->HasUses());
- HInstruction* to_insert = new (&allocator) HNullCheck(parameter1, 0);
+ HInstruction* to_insert = new (GetAllocator()) HNullCheck(parameter1, 0);
entry->InsertInstructionBefore(to_insert, parameter2);
ASSERT_TRUE(parameter1->HasUses());
@@ -98,48 +94,42 @@
/**
* Test that adding an instruction in the graph updates user lists.
*/
-TEST(Node, AddInstruction) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+TEST_F(NodeTest, AddInstruction) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
entry->AddInstruction(parameter);
ASSERT_FALSE(parameter->HasUses());
- HInstruction* to_add = new (&allocator) HNullCheck(parameter, 0);
+ HInstruction* to_add = new (GetAllocator()) HNullCheck(parameter, 0);
entry->AddInstruction(to_add);
ASSERT_TRUE(parameter->HasUses());
ASSERT_TRUE(parameter->GetUses().HasExactlyOneElement());
}
-TEST(Node, ParentEnvironment) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+TEST_F(NodeTest, ParentEnvironment) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter1 = new (&allocator) HParameterValue(
+ HInstruction* parameter1 = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
- HInstruction* with_environment = new (&allocator) HNullCheck(parameter1, 0);
+ HInstruction* with_environment = new (GetAllocator()) HNullCheck(parameter1, 0);
entry->AddInstruction(parameter1);
entry->AddInstruction(with_environment);
- entry->AddInstruction(new (&allocator) HExit());
+ entry->AddInstruction(new (GetAllocator()) HExit());
ASSERT_TRUE(parameter1->HasUses());
ASSERT_TRUE(parameter1->GetUses().HasExactlyOneElement());
- HEnvironment* environment = new (&allocator) HEnvironment(
- &allocator, 1, graph->GetArtMethod(), 0, with_environment);
- ArenaVector<HInstruction*> array(allocator.Adapter());
+ HEnvironment* environment = new (GetAllocator()) HEnvironment(
+ GetAllocator(), 1, graph->GetArtMethod(), 0, with_environment);
+ ArenaVector<HInstruction*> array(GetAllocator()->Adapter());
array.push_back(parameter1);
environment->CopyFrom(array);
@@ -148,22 +138,22 @@
ASSERT_TRUE(parameter1->HasEnvironmentUses());
ASSERT_TRUE(parameter1->GetEnvUses().HasExactlyOneElement());
- HEnvironment* parent1 = new (&allocator) HEnvironment(
- &allocator, 1, graph->GetArtMethod(), 0, nullptr);
+ HEnvironment* parent1 = new (GetAllocator()) HEnvironment(
+ GetAllocator(), 1, graph->GetArtMethod(), 0, nullptr);
parent1->CopyFrom(array);
ASSERT_EQ(parameter1->GetEnvUses().SizeSlow(), 2u);
- HEnvironment* parent2 = new (&allocator) HEnvironment(
- &allocator, 1, graph->GetArtMethod(), 0, nullptr);
+ HEnvironment* parent2 = new (GetAllocator()) HEnvironment(
+ GetAllocator(), 1, graph->GetArtMethod(), 0, nullptr);
parent2->CopyFrom(array);
- parent1->SetAndCopyParentChain(&allocator, parent2);
+ parent1->SetAndCopyParentChain(GetAllocator(), parent2);
// One use for parent2, and one other use for the new parent of parent1.
ASSERT_EQ(parameter1->GetEnvUses().SizeSlow(), 4u);
// We have copied the parent chain. So we now have two more uses.
- environment->SetAndCopyParentChain(&allocator, parent1);
+ environment->SetAndCopyParentChain(GetAllocator(), parent1);
ASSERT_EQ(parameter1->GetEnvUses().SizeSlow(), 6u);
}
diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h
index 0aac260..781a59f 100644
--- a/compiler/optimizing/nodes_vector.h
+++ b/compiler/optimizing/nodes_vector.h
@@ -34,7 +34,7 @@
DCHECK(IsPowerOfTwo(base));
}
- // Returns true if memory is "at least" aligned at the given boundary.
+ // Returns true if memory is at least aligned at the given boundary.
// Assumes requested base is power of two.
bool IsAlignedAt(size_t base) const {
DCHECK_NE(0u, base);
@@ -42,6 +42,10 @@
return ((offset_ | base_) & (base - 1u)) == 0;
}
+ size_t Base() const { return base_; }
+
+ size_t Offset() const { return offset_; }
+
std::string ToString() const {
return "ALIGN(" + std::to_string(base_) + "," + std::to_string(offset_) + ")";
}
@@ -67,7 +71,7 @@
// TODO: we could introduce SIMD types in HIR.
static constexpr DataType::Type kSIMDType = DataType::Type::kFloat64;
- HVecOperation(ArenaAllocator* arena,
+ HVecOperation(ArenaAllocator* allocator,
DataType::Type packed_type,
SideEffects side_effects,
size_t number_of_inputs,
@@ -75,7 +79,7 @@
uint32_t dex_pc)
: HVariableInputSizeInstruction(side_effects,
dex_pc,
- arena,
+ allocator,
number_of_inputs,
kArenaAllocVectorNode),
vector_length_(vector_length) {
@@ -116,6 +120,22 @@
return GetVectorLength() == o->GetVectorLength() && GetPackedType() == o->GetPackedType();
}
+ // Maps an integral type to the same-size signed type and leaves other types alone.
+ // Can be used to test relaxed type consistency in which packed same-size integral
+ // types can co-exist, but other type mixes are an error.
+ static DataType::Type ToSignedType(DataType::Type type) {
+ switch (type) {
+ case DataType::Type::kBool: // 1-byte storage unit
+ case DataType::Type::kUint8:
+ return DataType::Type::kInt8;
+ case DataType::Type::kUint16:
+ return DataType::Type::kInt16;
+ default:
+ DCHECK(type != DataType::Type::kVoid && type != DataType::Type::kReference) << type;
+ return type;
+ }
+ }
+
DECLARE_ABSTRACT_INSTRUCTION(VecOperation);
protected:
@@ -136,12 +156,12 @@
// Abstraction of a unary vector operation.
class HVecUnaryOperation : public HVecOperation {
public:
- HVecUnaryOperation(ArenaAllocator* arena,
+ HVecUnaryOperation(ArenaAllocator* allocator,
HInstruction* input,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecOperation(arena,
+ : HVecOperation(allocator,
packed_type,
SideEffects::None(),
/* number_of_inputs */ 1,
@@ -161,13 +181,13 @@
// Abstraction of a binary vector operation.
class HVecBinaryOperation : public HVecOperation {
public:
- HVecBinaryOperation(ArenaAllocator* arena,
+ HVecBinaryOperation(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
uint32_t dex_pc)
- : HVecOperation(arena,
+ : HVecOperation(allocator,
packed_type,
SideEffects::None(),
/* number_of_inputs */ 2,
@@ -187,17 +207,21 @@
};
// Abstraction of a vector operation that references memory, with an alignment.
-// The Android runtime guarantees at least "component size" alignment for array
-// elements and, thus, vectors.
+// The Android runtime guarantees elements have at least natural alignment.
class HVecMemoryOperation : public HVecOperation {
public:
- HVecMemoryOperation(ArenaAllocator* arena,
+ HVecMemoryOperation(ArenaAllocator* allocator,
DataType::Type packed_type,
SideEffects side_effects,
size_t number_of_inputs,
size_t vector_length,
uint32_t dex_pc)
- : HVecOperation(arena, packed_type, side_effects, number_of_inputs, vector_length, dex_pc),
+ : HVecOperation(allocator,
+ packed_type,
+ side_effects,
+ number_of_inputs,
+ vector_length,
+ dex_pc),
alignment_(DataType::Size(packed_type), 0) {
DCHECK_GE(number_of_inputs, 2u);
}
@@ -230,18 +254,7 @@
}
DCHECK(input->IsVecOperation());
DataType::Type input_type = input->AsVecOperation()->GetPackedType();
- switch (input_type) {
- case DataType::Type::kBool:
- case DataType::Type::kInt8:
- return type == DataType::Type::kBool ||
- type == DataType::Type::kInt8;
- case DataType::Type::kUint16:
- case DataType::Type::kInt16:
- return type == DataType::Type::kUint16 ||
- type == DataType::Type::kInt16;
- default:
- return type == input_type;
- }
+ return HVecOperation::ToSignedType(input_type) == HVecOperation::ToSignedType(type);
}
//
@@ -252,12 +265,12 @@
// viz. replicate(x) = [ x, .. , x ].
class HVecReplicateScalar FINAL : public HVecUnaryOperation {
public:
- HVecReplicateScalar(ArenaAllocator* arena,
+ HVecReplicateScalar(ArenaAllocator* allocator,
HInstruction* scalar,
DataType::Type packed_type,
size_t vector_length,
- uint32_t dex_pc = kNoDexPc)
- : HVecUnaryOperation(arena, scalar, packed_type, vector_length, dex_pc) {
+ uint32_t dex_pc)
+ : HVecUnaryOperation(allocator, scalar, packed_type, vector_length, dex_pc) {
DCHECK(!scalar->IsVecOperation());
}
@@ -277,13 +290,13 @@
// TODO: for now only i == 1 case supported.
class HVecExtractScalar FINAL : public HVecUnaryOperation {
public:
- HVecExtractScalar(ArenaAllocator* arena,
+ HVecExtractScalar(ArenaAllocator* allocator,
HInstruction* input,
DataType::Type packed_type,
size_t vector_length,
size_t index,
- uint32_t dex_pc = kNoDexPc)
- : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) {
+ uint32_t dex_pc)
+ : HVecUnaryOperation(allocator, input, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(input, packed_type));
DCHECK_LT(index, vector_length);
DCHECK_EQ(index, 0u);
@@ -315,13 +328,13 @@
kMax = 3
};
- HVecReduce(ArenaAllocator* arena,
+ HVecReduce(ArenaAllocator* allocator,
HInstruction* input,
DataType::Type packed_type,
size_t vector_length,
ReductionKind kind,
- uint32_t dex_pc = kNoDexPc)
- : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc),
+ uint32_t dex_pc)
+ : HVecUnaryOperation(allocator, input, packed_type, vector_length, dex_pc),
kind_(kind) {
DCHECK(HasConsistentPackedTypes(input, packed_type));
}
@@ -348,12 +361,12 @@
// viz. cnv[ x1, .. , xn ] = [ cnv(x1), .. , cnv(xn) ].
class HVecCnv FINAL : public HVecUnaryOperation {
public:
- HVecCnv(ArenaAllocator* arena,
+ HVecCnv(ArenaAllocator* allocator,
HInstruction* input,
DataType::Type packed_type,
size_t vector_length,
- uint32_t dex_pc = kNoDexPc)
- : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) {
+ uint32_t dex_pc)
+ : HVecUnaryOperation(allocator, input, packed_type, vector_length, dex_pc) {
DCHECK(input->IsVecOperation());
DCHECK_NE(GetInputType(), GetResultType()); // actual convert
}
@@ -373,12 +386,12 @@
// viz. neg[ x1, .. , xn ] = [ -x1, .. , -xn ].
class HVecNeg FINAL : public HVecUnaryOperation {
public:
- HVecNeg(ArenaAllocator* arena,
+ HVecNeg(ArenaAllocator* allocator,
HInstruction* input,
DataType::Type packed_type,
size_t vector_length,
- uint32_t dex_pc = kNoDexPc)
- : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) {
+ uint32_t dex_pc)
+ : HVecUnaryOperation(allocator, input, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(input, packed_type));
}
@@ -391,15 +404,16 @@
};
// Takes absolute value of every component in the vector,
-// viz. abs[ x1, .. , xn ] = [ |x1|, .. , |xn| ].
+// viz. abs[ x1, .. , xn ] = [ |x1|, .. , |xn| ]
+// for signed operand x.
class HVecAbs FINAL : public HVecUnaryOperation {
public:
- HVecAbs(ArenaAllocator* arena,
+ HVecAbs(ArenaAllocator* allocator,
HInstruction* input,
DataType::Type packed_type,
size_t vector_length,
- uint32_t dex_pc = kNoDexPc)
- : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) {
+ uint32_t dex_pc)
+ : HVecUnaryOperation(allocator, input, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(input, packed_type));
}
@@ -416,12 +430,12 @@
// not[ x1, .. , xn ] = [ !x1, .. , !xn ] for boolean.
class HVecNot FINAL : public HVecUnaryOperation {
public:
- HVecNot(ArenaAllocator* arena,
+ HVecNot(ArenaAllocator* allocator,
HInstruction* input,
DataType::Type packed_type,
size_t vector_length,
- uint32_t dex_pc = kNoDexPc)
- : HVecUnaryOperation(arena, input, packed_type, vector_length, dex_pc) {
+ uint32_t dex_pc)
+ : HVecUnaryOperation(allocator, input, packed_type, vector_length, dex_pc) {
DCHECK(input->IsVecOperation());
}
@@ -441,13 +455,13 @@
// viz. [ x1, .. , xn ] + [ y1, .. , yn ] = [ x1 + y1, .. , xn + yn ].
class HVecAdd FINAL : public HVecBinaryOperation {
public:
- HVecAdd(ArenaAllocator* arena,
+ HVecAdd(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
- uint32_t dex_pc = kNoDexPc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ uint32_t dex_pc)
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
@@ -463,18 +477,23 @@
// Performs halving add on every component in the two vectors, viz.
// rounded [ x1, .. , xn ] hradd [ y1, .. , yn ] = [ (x1 + y1 + 1) >> 1, .. , (xn + yn + 1) >> 1 ]
// truncated [ x1, .. , xn ] hadd [ y1, .. , yn ] = [ (x1 + y1) >> 1, .. , (xn + yn ) >> 1 ]
-// for signed operands x, y (sign extension) or unsigned operands x, y (zero extension).
+// for either both signed or both unsigned operands x, y.
class HVecHalvingAdd FINAL : public HVecBinaryOperation {
public:
- HVecHalvingAdd(ArenaAllocator* arena,
+ HVecHalvingAdd(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
- bool is_unsigned,
bool is_rounded,
- uint32_t dex_pc = kNoDexPc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ bool is_unsigned,
+ uint32_t dex_pc)
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
+ // The `is_unsigned` flag should be used exclusively with the Int32 or Int64.
+ // This flag is a temporary measure while we do not have the Uint32 and Uint64 data types.
+ DCHECK(!is_unsigned ||
+ packed_type == DataType::Type::kInt32 ||
+ packed_type == DataType::Type::kInt64) << packed_type;
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
SetPackedFlag<kFieldHAddIsUnsigned>(is_unsigned);
@@ -510,13 +529,13 @@
// viz. [ x1, .. , xn ] - [ y1, .. , yn ] = [ x1 - y1, .. , xn - yn ].
class HVecSub FINAL : public HVecBinaryOperation {
public:
- HVecSub(ArenaAllocator* arena,
+ HVecSub(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
- uint32_t dex_pc = kNoDexPc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ uint32_t dex_pc)
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
@@ -533,13 +552,13 @@
// viz. [ x1, .. , xn ] * [ y1, .. , yn ] = [ x1 * y1, .. , xn * yn ].
class HVecMul FINAL : public HVecBinaryOperation {
public:
- HVecMul(ArenaAllocator* arena,
+ HVecMul(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
- uint32_t dex_pc = kNoDexPc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ uint32_t dex_pc)
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
@@ -556,13 +575,13 @@
// viz. [ x1, .. , xn ] / [ y1, .. , yn ] = [ x1 / y1, .. , xn / yn ].
class HVecDiv FINAL : public HVecBinaryOperation {
public:
- HVecDiv(ArenaAllocator* arena,
+ HVecDiv(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
- uint32_t dex_pc = kNoDexPc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ uint32_t dex_pc)
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
}
@@ -576,17 +595,23 @@
};
// Takes minimum of every component in the two vectors,
-// viz. MIN( [ x1, .. , xn ] , [ y1, .. , yn ]) = [ min(x1, y1), .. , min(xn, yn) ].
+// viz. MIN( [ x1, .. , xn ] , [ y1, .. , yn ]) = [ min(x1, y1), .. , min(xn, yn) ]
+// for either both signed or both unsigned operands x, y.
class HVecMin FINAL : public HVecBinaryOperation {
public:
- HVecMin(ArenaAllocator* arena,
+ HVecMin(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
bool is_unsigned,
- uint32_t dex_pc = kNoDexPc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ uint32_t dex_pc)
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
+ // The `is_unsigned` flag should be used exclusively with the Int32 or Int64.
+ // This flag is a temporary measure while we do not have the Uint32 and Uint64 data types.
+ DCHECK(!is_unsigned ||
+ packed_type == DataType::Type::kInt32 ||
+ packed_type == DataType::Type::kInt64) << packed_type;
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
SetPackedFlag<kFieldMinOpIsUnsigned>(is_unsigned);
@@ -614,17 +639,23 @@
};
// Takes maximum of every component in the two vectors,
-// viz. MAX( [ x1, .. , xn ] , [ y1, .. , yn ]) = [ max(x1, y1), .. , max(xn, yn) ].
+// viz. MAX( [ x1, .. , xn ] , [ y1, .. , yn ]) = [ max(x1, y1), .. , max(xn, yn) ]
+// for either both signed or both unsigned operands x, y.
class HVecMax FINAL : public HVecBinaryOperation {
public:
- HVecMax(ArenaAllocator* arena,
+ HVecMax(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
bool is_unsigned,
- uint32_t dex_pc = kNoDexPc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ uint32_t dex_pc)
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
+ // The `is_unsigned` flag should be used exclusively with the Int32 or Int64.
+ // This flag is a temporary measure while we do not have the Uint32 and Uint64 data types.
+ DCHECK(!is_unsigned ||
+ packed_type == DataType::Type::kInt32 ||
+ packed_type == DataType::Type::kInt64) << packed_type;
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
SetPackedFlag<kFieldMaxOpIsUnsigned>(is_unsigned);
@@ -655,13 +686,13 @@
// viz. [ x1, .. , xn ] & [ y1, .. , yn ] = [ x1 & y1, .. , xn & yn ].
class HVecAnd FINAL : public HVecBinaryOperation {
public:
- HVecAnd(ArenaAllocator* arena,
+ HVecAnd(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
- uint32_t dex_pc = kNoDexPc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ uint32_t dex_pc)
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(left->IsVecOperation() && right->IsVecOperation());
}
@@ -677,13 +708,13 @@
// viz. [ x1, .. , xn ] and-not [ y1, .. , yn ] = [ ~x1 & y1, .. , ~xn & yn ].
class HVecAndNot FINAL : public HVecBinaryOperation {
public:
- HVecAndNot(ArenaAllocator* arena,
+ HVecAndNot(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
- uint32_t dex_pc = kNoDexPc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ uint32_t dex_pc)
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(left->IsVecOperation() && right->IsVecOperation());
}
@@ -699,13 +730,13 @@
// viz. [ x1, .. , xn ] | [ y1, .. , yn ] = [ x1 | y1, .. , xn | yn ].
class HVecOr FINAL : public HVecBinaryOperation {
public:
- HVecOr(ArenaAllocator* arena,
+ HVecOr(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
- uint32_t dex_pc = kNoDexPc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ uint32_t dex_pc)
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(left->IsVecOperation() && right->IsVecOperation());
}
@@ -721,13 +752,13 @@
// viz. [ x1, .. , xn ] ^ [ y1, .. , yn ] = [ x1 ^ y1, .. , xn ^ yn ].
class HVecXor FINAL : public HVecBinaryOperation {
public:
- HVecXor(ArenaAllocator* arena,
+ HVecXor(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
- uint32_t dex_pc = kNoDexPc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ uint32_t dex_pc)
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(left->IsVecOperation() && right->IsVecOperation());
}
@@ -743,13 +774,13 @@
// viz. [ x1, .. , xn ] << d = [ x1 << d, .. , xn << d ].
class HVecShl FINAL : public HVecBinaryOperation {
public:
- HVecShl(ArenaAllocator* arena,
+ HVecShl(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
- uint32_t dex_pc = kNoDexPc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ uint32_t dex_pc)
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(left, packed_type));
}
@@ -765,13 +796,13 @@
// viz. [ x1, .. , xn ] >> d = [ x1 >> d, .. , xn >> d ].
class HVecShr FINAL : public HVecBinaryOperation {
public:
- HVecShr(ArenaAllocator* arena,
+ HVecShr(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
- uint32_t dex_pc = kNoDexPc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ uint32_t dex_pc)
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(left, packed_type));
}
@@ -787,13 +818,13 @@
// viz. [ x1, .. , xn ] >>> d = [ x1 >>> d, .. , xn >>> d ].
class HVecUShr FINAL : public HVecBinaryOperation {
public:
- HVecUShr(ArenaAllocator* arena,
+ HVecUShr(ArenaAllocator* allocator,
HInstruction* left,
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
- uint32_t dex_pc = kNoDexPc)
- : HVecBinaryOperation(arena, left, right, packed_type, vector_length, dex_pc) {
+ uint32_t dex_pc)
+ : HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
DCHECK(HasConsistentPackedTypes(left, packed_type));
}
@@ -814,13 +845,13 @@
// set( array(x1, .. , xm) ) = [ x1, .. , xm, 0, .. , 0 ] if m < n.
class HVecSetScalars FINAL : public HVecOperation {
public:
- HVecSetScalars(ArenaAllocator* arena,
+ HVecSetScalars(ArenaAllocator* allocator,
HInstruction* scalars[],
DataType::Type packed_type,
size_t vector_length,
size_t number_of_scalars,
- uint32_t dex_pc = kNoDexPc)
- : HVecOperation(arena,
+ uint32_t dex_pc)
+ : HVecOperation(allocator,
packed_type,
SideEffects::None(),
number_of_scalars,
@@ -846,15 +877,15 @@
// viz. [ a1, .. , an ] + [ x1, .. , xn ] * [ y1, .. , yn ] = [ a1 + x1 * y1, .. , an + xn * yn ].
class HVecMultiplyAccumulate FINAL : public HVecOperation {
public:
- HVecMultiplyAccumulate(ArenaAllocator* arena,
+ HVecMultiplyAccumulate(ArenaAllocator* allocator,
InstructionKind op,
HInstruction* accumulator,
HInstruction* mul_left,
HInstruction* mul_right,
DataType::Type packed_type,
size_t vector_length,
- uint32_t dex_pc = kNoDexPc)
- : HVecOperation(arena,
+ uint32_t dex_pc)
+ : HVecOperation(allocator,
packed_type,
SideEffects::None(),
/* number_of_inputs */ 3,
@@ -891,19 +922,19 @@
// Takes the absolute difference of two vectors, and adds the results to
// same-precision or wider-precision components in the accumulator,
-// viz. SAD([ a1, .. , am ], [ x1, .. , xn ], [ y1, .. , yn ] =
+// viz. SAD([ a1, .. , am ], [ x1, .. , xn ], [ y1, .. , yn ]) =
// [ a1 + sum abs(xi-yi), .. , am + sum abs(xj-yj) ],
-// for m <= n and non-overlapping sums.
+// for m <= n, non-overlapping sums, and signed operands x, y.
class HVecSADAccumulate FINAL : public HVecOperation {
public:
- HVecSADAccumulate(ArenaAllocator* arena,
+ HVecSADAccumulate(ArenaAllocator* allocator,
HInstruction* accumulator,
HInstruction* sad_left,
HInstruction* sad_right,
DataType::Type packed_type,
size_t vector_length,
- uint32_t dex_pc = kNoDexPc)
- : HVecOperation(arena,
+ uint32_t dex_pc)
+ : HVecOperation(allocator,
packed_type,
SideEffects::None(),
/* number_of_inputs */ 3,
@@ -929,16 +960,17 @@
// yield the vector [ mem(1), .. , mem(n) ].
class HVecLoad FINAL : public HVecMemoryOperation {
public:
- HVecLoad(ArenaAllocator* arena,
+ HVecLoad(ArenaAllocator* allocator,
HInstruction* base,
HInstruction* index,
DataType::Type packed_type,
+ SideEffects side_effects,
size_t vector_length,
bool is_string_char_at,
- uint32_t dex_pc = kNoDexPc)
- : HVecMemoryOperation(arena,
+ uint32_t dex_pc)
+ : HVecMemoryOperation(allocator,
packed_type,
- SideEffects::ArrayReadOfType(packed_type),
+ side_effects,
/* number_of_inputs */ 2,
vector_length,
dex_pc) {
@@ -972,16 +1004,17 @@
// sets mem(1) = x1, .. , mem(n) = xn.
class HVecStore FINAL : public HVecMemoryOperation {
public:
- HVecStore(ArenaAllocator* arena,
+ HVecStore(ArenaAllocator* allocator,
HInstruction* base,
HInstruction* index,
HInstruction* value,
DataType::Type packed_type,
+ SideEffects side_effects,
size_t vector_length,
- uint32_t dex_pc = kNoDexPc)
- : HVecMemoryOperation(arena,
+ uint32_t dex_pc)
+ : HVecMemoryOperation(allocator,
packed_type,
- SideEffects::ArrayWriteOfType(packed_type),
+ side_effects,
/* number_of_inputs */ 3,
vector_length,
dex_pc) {
diff --git a/compiler/optimizing/nodes_vector_test.cc b/compiler/optimizing/nodes_vector_test.cc
index 3acdb20..ab9d759 100644
--- a/compiler/optimizing/nodes_vector_test.cc
+++ b/compiler/optimizing/nodes_vector_test.cc
@@ -23,12 +23,10 @@
/**
* Fixture class for testing vector nodes.
*/
-class NodesVectorTest : public CommonCompilerTest {
+class NodesVectorTest : public OptimizingUnitTest {
public:
NodesVectorTest()
- : pool_(),
- allocator_(&pool_),
- graph_(CreateGraph(&allocator_)) {
+ : graph_(CreateGraph()) {
BuildGraph();
}
@@ -36,28 +34,38 @@
void BuildGraph() {
graph_->SetNumberOfVRegs(1);
- entry_block_ = new (&allocator_) HBasicBlock(graph_);
- exit_block_ = new (&allocator_) HBasicBlock(graph_);
+ entry_block_ = new (GetAllocator()) HBasicBlock(graph_);
+ exit_block_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry_block_);
graph_->AddBlock(exit_block_);
graph_->SetEntryBlock(entry_block_);
graph_->SetExitBlock(exit_block_);
- parameter_ = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kInt32);
- entry_block_->AddInstruction(parameter_);
+ int8_parameter_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(1),
+ 0,
+ DataType::Type::kInt8);
+ entry_block_->AddInstruction(int8_parameter_);
+ int16_parameter_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(2),
+ 0,
+ DataType::Type::kInt16);
+ entry_block_->AddInstruction(int16_parameter_);
+ int32_parameter_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kInt32);
+ entry_block_->AddInstruction(int32_parameter_);
}
// General building fields.
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
HBasicBlock* entry_block_;
HBasicBlock* exit_block_;
- HInstruction* parameter_;
+ HInstruction* int8_parameter_;
+ HInstruction* int16_parameter_;
+ HInstruction* int32_parameter_;
};
//
@@ -92,6 +100,10 @@
EXPECT_FALSE(Alignment(16, 1).IsAlignedAt(16));
EXPECT_FALSE(Alignment(16, 7).IsAlignedAt(16));
EXPECT_FALSE(Alignment(16, 0).IsAlignedAt(32));
+
+ EXPECT_EQ(16u, Alignment(16, 0).Base());
+ EXPECT_EQ(0u, Alignment(16, 0).Offset());
+ EXPECT_EQ(4u, Alignment(16, 4).Offset());
}
TEST(NodesVector, AlignmentEQ) {
@@ -118,16 +130,23 @@
}
TEST_F(NodesVectorTest, VectorOperationProperties) {
- HVecOperation* v0 = new (&allocator_)
- HVecReplicateScalar(&allocator_, parameter_, DataType::Type::kInt32, 4);
- HVecOperation* v1 = new (&allocator_)
- HVecReplicateScalar(&allocator_, parameter_, DataType::Type::kInt32, 4);
- HVecOperation* v2 = new (&allocator_)
- HVecReplicateScalar(&allocator_, parameter_, DataType::Type::kInt32, 2);
- HVecOperation* v3 = new (&allocator_)
- HVecReplicateScalar(&allocator_, parameter_, DataType::Type::kInt16, 4);
- HVecOperation* v4 = new (&allocator_)
- HVecStore(&allocator_, parameter_, parameter_, v0, DataType::Type::kInt32, 4);
+ HVecOperation* v0 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
+ HVecOperation* v1 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
+ HVecOperation* v2 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 2, kNoDexPc);
+ HVecOperation* v3 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
+ HVecOperation* v4 = new (GetAllocator()) HVecStore(
+ GetAllocator(),
+ int32_parameter_,
+ int32_parameter_,
+ v0,
+ DataType::Type::kInt32,
+ SideEffects::ArrayWriteOfType(DataType::Type::kInt32),
+ 4,
+ kNoDexPc);
EXPECT_TRUE(v0->Equals(v0));
EXPECT_TRUE(v1->Equals(v1));
@@ -175,12 +194,30 @@
}
TEST_F(NodesVectorTest, VectorAlignmentAndStringCharAtMatterOnLoad) {
- HVecLoad* v0 = new (&allocator_) HVecLoad(
- &allocator_, parameter_, parameter_, DataType::Type::kInt32, 4, /*is_string_char_at*/ false);
- HVecLoad* v1 = new (&allocator_) HVecLoad(
- &allocator_, parameter_, parameter_, DataType::Type::kInt32, 4, /*is_string_char_at*/ false);
- HVecLoad* v2 = new (&allocator_) HVecLoad(
- &allocator_, parameter_, parameter_, DataType::Type::kInt32, 4, /*is_string_char_at*/ true);
+ HVecLoad* v0 = new (GetAllocator()) HVecLoad(GetAllocator(),
+ int32_parameter_,
+ int32_parameter_,
+ DataType::Type::kInt32,
+ SideEffects::ArrayReadOfType(DataType::Type::kInt32),
+ 4,
+ /*is_string_char_at*/ false,
+ kNoDexPc);
+ HVecLoad* v1 = new (GetAllocator()) HVecLoad(GetAllocator(),
+ int32_parameter_,
+ int32_parameter_,
+ DataType::Type::kInt32,
+ SideEffects::ArrayReadOfType(DataType::Type::kInt32),
+ 4,
+ /*is_string_char_at*/ false,
+ kNoDexPc);
+ HVecLoad* v2 = new (GetAllocator()) HVecLoad(GetAllocator(),
+ int32_parameter_,
+ int32_parameter_,
+ DataType::Type::kInt32,
+ SideEffects::ArrayReadOfType(DataType::Type::kInt32),
+ 4,
+ /*is_string_char_at*/ true,
+ kNoDexPc);
EXPECT_TRUE(v0->CanBeMoved());
EXPECT_TRUE(v1->CanBeMoved());
@@ -195,7 +232,7 @@
EXPECT_TRUE(v2->Equals(v2));
EXPECT_TRUE(v0->Equals(v1));
- EXPECT_FALSE(v0->Equals(v2));
+ EXPECT_FALSE(v0->Equals(v2)); // different is_string_char_at
EXPECT_TRUE(v0->GetAlignment() == Alignment(4, 0));
EXPECT_TRUE(v1->GetAlignment() == Alignment(4, 0));
@@ -208,112 +245,226 @@
EXPECT_FALSE(v0->Equals(v1)); // no longer equal
}
-TEST_F(NodesVectorTest, VectorSignMattersOnMin) {
- HVecOperation* v0 = new (&allocator_)
- HVecReplicateScalar(&allocator_, parameter_, DataType::Type::kInt32, 4);
-
- HVecMin* v1 = new (&allocator_)
- HVecMin(&allocator_, v0, v0, DataType::Type::kInt32, 4, /*is_unsigned*/ true);
- HVecMin* v2 = new (&allocator_)
- HVecMin(&allocator_, v0, v0, DataType::Type::kInt32, 4, /*is_unsigned*/ false);
- HVecMin* v3 = new (&allocator_)
- HVecMin(&allocator_, v0, v0, DataType::Type::kInt32, 2, /*is_unsigned*/ true);
+TEST_F(NodesVectorTest, VectorAlignmentMattersOnStore) {
+ HVecOperation* p0 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
+ HVecStore* v0 = new (GetAllocator()) HVecStore(
+ GetAllocator(),
+ int32_parameter_,
+ int32_parameter_,
+ p0,
+ DataType::Type::kInt32,
+ SideEffects::ArrayWriteOfType(DataType::Type::kInt32),
+ 4,
+ kNoDexPc);
+ HVecStore* v1 = new (GetAllocator()) HVecStore(
+ GetAllocator(),
+ int32_parameter_,
+ int32_parameter_,
+ p0,
+ DataType::Type::kInt32,
+ SideEffects::ArrayWriteOfType(DataType::Type::kInt32),
+ 4,
+ kNoDexPc);
EXPECT_FALSE(v0->CanBeMoved());
- EXPECT_TRUE(v1->CanBeMoved());
- EXPECT_TRUE(v2->CanBeMoved());
- EXPECT_TRUE(v3->CanBeMoved());
+ EXPECT_FALSE(v1->CanBeMoved());
- EXPECT_TRUE(v1->IsUnsigned());
- EXPECT_FALSE(v2->IsUnsigned());
- EXPECT_TRUE(v3->IsUnsigned());
+ EXPECT_TRUE(v0->Equals(v1));
- EXPECT_TRUE(v1->Equals(v1));
- EXPECT_TRUE(v2->Equals(v2));
- EXPECT_TRUE(v3->Equals(v3));
+ EXPECT_TRUE(v0->GetAlignment() == Alignment(4, 0));
+ EXPECT_TRUE(v1->GetAlignment() == Alignment(4, 0));
- EXPECT_FALSE(v1->Equals(v2)); // different signs
- EXPECT_FALSE(v1->Equals(v3)); // different vector lengths
+ v1->SetAlignment(Alignment(8, 0));
+
+ EXPECT_TRUE(v1->GetAlignment() == Alignment(8, 0));
+
+ EXPECT_FALSE(v0->Equals(v1)); // no longer equal
+}
+
+TEST_F(NodesVectorTest, VectorSignMattersOnMin) {
+ HVecOperation* p0 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
+ HVecOperation* p1 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc);
+ HVecOperation* p2 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
+
+ HVecMin* v0 = new (GetAllocator()) HVecMin(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ true, kNoDexPc);
+ HVecMin* v1 = new (GetAllocator()) HVecMin(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ false, kNoDexPc);
+ HVecMin* v2 = new (GetAllocator()) HVecMin(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 2, /*is_unsigned*/ true, kNoDexPc);
+ HVecMin* v3 = new (GetAllocator()) HVecMin(
+ GetAllocator(), p1, p1, DataType::Type::kUint8, 16, /*is_unsigned*/ false, kNoDexPc);
+ HVecMin* v4 = new (GetAllocator()) HVecMin(
+ GetAllocator(), p1, p1, DataType::Type::kInt8, 16, /*is_unsigned*/ false, kNoDexPc);
+ HVecMin* v5 = new (GetAllocator()) HVecMin(
+ GetAllocator(), p2, p2, DataType::Type::kUint16, 8, /*is_unsigned*/ false, kNoDexPc);
+ HVecMin* v6 = new (GetAllocator()) HVecMin(
+ GetAllocator(), p2, p2, DataType::Type::kInt16, 8, /*is_unsigned*/ false, kNoDexPc);
+ HVecMin* min_insns[] = { v0, v1, v2, v3, v4, v5, v6 };
+
+ EXPECT_FALSE(p0->CanBeMoved());
+ EXPECT_FALSE(p1->CanBeMoved());
+ EXPECT_FALSE(p2->CanBeMoved());
+
+ for (HVecMin* min_insn : min_insns) {
+ EXPECT_TRUE(min_insn->CanBeMoved());
+ }
+
+ // Deprecated; IsUnsigned() should be removed with the introduction of Uint32 and Uint64.
+ EXPECT_TRUE(v0->IsUnsigned());
+ EXPECT_FALSE(v1->IsUnsigned());
+ EXPECT_TRUE(v2->IsUnsigned());
+
+ for (HVecMin* min_insn1 : min_insns) {
+ for (HVecMin* min_insn2 : min_insns) {
+ EXPECT_EQ(min_insn1 == min_insn2, min_insn1->Equals(min_insn2));
+ }
+ }
}
TEST_F(NodesVectorTest, VectorSignMattersOnMax) {
- HVecOperation* v0 = new (&allocator_)
- HVecReplicateScalar(&allocator_, parameter_, DataType::Type::kInt32, 4);
+ HVecOperation* p0 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
+ HVecOperation* p1 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc);
+ HVecOperation* p2 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
- HVecMax* v1 = new (&allocator_)
- HVecMax(&allocator_, v0, v0, DataType::Type::kInt32, 4, /*is_unsigned*/ true);
- HVecMax* v2 = new (&allocator_)
- HVecMax(&allocator_, v0, v0, DataType::Type::kInt32, 4, /*is_unsigned*/ false);
- HVecMax* v3 = new (&allocator_)
- HVecMax(&allocator_, v0, v0, DataType::Type::kInt32, 2, /*is_unsigned*/ true);
+ HVecMax* v0 = new (GetAllocator()) HVecMax(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ true, kNoDexPc);
+ HVecMax* v1 = new (GetAllocator()) HVecMax(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ false, kNoDexPc);
+ HVecMax* v2 = new (GetAllocator()) HVecMax(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 2, /*is_unsigned*/ true, kNoDexPc);
+ HVecMax* v3 = new (GetAllocator()) HVecMax(
+ GetAllocator(), p1, p1, DataType::Type::kUint8, 16, /*is_unsigned*/ false, kNoDexPc);
+ HVecMax* v4 = new (GetAllocator()) HVecMax(
+ GetAllocator(), p1, p1, DataType::Type::kInt8, 16, /*is_unsigned*/ false, kNoDexPc);
+ HVecMax* v5 = new (GetAllocator()) HVecMax(
+ GetAllocator(), p2, p2, DataType::Type::kUint16, 8, /*is_unsigned*/ false, kNoDexPc);
+ HVecMax* v6 = new (GetAllocator()) HVecMax(
+ GetAllocator(), p2, p2, DataType::Type::kInt16, 8, /*is_unsigned*/ false, kNoDexPc);
+ HVecMax* max_insns[] = { v0, v1, v2, v3, v4, v5, v6 };
- EXPECT_FALSE(v0->CanBeMoved());
- EXPECT_TRUE(v1->CanBeMoved());
- EXPECT_TRUE(v2->CanBeMoved());
- EXPECT_TRUE(v3->CanBeMoved());
+ EXPECT_FALSE(p0->CanBeMoved());
+ EXPECT_FALSE(p1->CanBeMoved());
+ EXPECT_FALSE(p2->CanBeMoved());
- EXPECT_TRUE(v1->IsUnsigned());
- EXPECT_FALSE(v2->IsUnsigned());
- EXPECT_TRUE(v3->IsUnsigned());
+ for (HVecMax* max_insn : max_insns) {
+ EXPECT_TRUE(max_insn->CanBeMoved());
+ }
- EXPECT_TRUE(v1->Equals(v1));
- EXPECT_TRUE(v2->Equals(v2));
- EXPECT_TRUE(v3->Equals(v3));
+ // Deprecated; IsUnsigned() should be removed with the introduction of Uint32 and Uint64.
+ EXPECT_TRUE(v0->IsUnsigned());
+ EXPECT_FALSE(v1->IsUnsigned());
+ EXPECT_TRUE(v2->IsUnsigned());
- EXPECT_FALSE(v1->Equals(v2)); // different signs
- EXPECT_FALSE(v1->Equals(v3)); // different vector lengths
+ for (HVecMax* max_insn1 : max_insns) {
+ for (HVecMax* max_insn2 : max_insns) {
+ EXPECT_EQ(max_insn1 == max_insn2, max_insn1->Equals(max_insn2));
+ }
+ }
}
TEST_F(NodesVectorTest, VectorAttributesMatterOnHalvingAdd) {
- HVecOperation* v0 = new (&allocator_)
- HVecReplicateScalar(&allocator_, parameter_, DataType::Type::kInt32, 4);
+ HVecOperation* p0 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
+ HVecOperation* p1 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc);
+ HVecOperation* p2 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
- HVecHalvingAdd* v1 = new (&allocator_) HVecHalvingAdd(
- &allocator_, v0, v0, DataType::Type::kInt32, 4, /*is_unsigned*/ true, /*is_rounded*/ true);
- HVecHalvingAdd* v2 = new (&allocator_) HVecHalvingAdd(
- &allocator_, v0, v0, DataType::Type::kInt32, 4, /*is_unsigned*/ true, /*is_rounded*/ false);
- HVecHalvingAdd* v3 = new (&allocator_) HVecHalvingAdd(
- &allocator_, v0, v0, DataType::Type::kInt32, 4, /*is_unsigned*/ false, /*is_rounded*/ true);
- HVecHalvingAdd* v4 = new (&allocator_) HVecHalvingAdd(
- &allocator_, v0, v0, DataType::Type::kInt32, 4, /*is_unsigned*/ false, /*is_rounded*/ false);
- HVecHalvingAdd* v5 = new (&allocator_) HVecHalvingAdd(
- &allocator_, v0, v0, DataType::Type::kInt32, 2, /*is_unsigned*/ true, /*is_rounded*/ true);
+ HVecHalvingAdd* v0 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
+ /*is_rounded*/ true, /*is_unsigned*/ true, kNoDexPc);
+ HVecHalvingAdd* v1 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
+ /*is_rounded*/ false, /*is_unsigned*/ true, kNoDexPc);
+ HVecHalvingAdd* v2 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
+ /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
+ HVecHalvingAdd* v3 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
+ /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
+ HVecHalvingAdd* v4 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 2,
+ /*is_rounded*/ true, /*is_unsigned*/ true, kNoDexPc);
+ HVecHalvingAdd* v5 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p1, p1, DataType::Type::kUint8, 16,
+ /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
+ HVecHalvingAdd* v6 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p1, p1, DataType::Type::kUint8, 16,
+ /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
+ HVecHalvingAdd* v7 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p1, p1, DataType::Type::kInt8, 16,
+ /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
+ HVecHalvingAdd* v8 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p1, p1, DataType::Type::kInt8, 16,
+ /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
+ HVecHalvingAdd* v9 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p2, p2, DataType::Type::kUint16, 8,
+ /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
+ HVecHalvingAdd* v10 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p2, p2, DataType::Type::kUint16, 8,
+ /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
+ HVecHalvingAdd* v11 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p2, p2, DataType::Type::kInt16, 2,
+ /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
+ HVecHalvingAdd* v12 = new (GetAllocator()) HVecHalvingAdd(
+ GetAllocator(), p2, p2, DataType::Type::kInt16, 2,
+ /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
+ HVecHalvingAdd* hadd_insns[] = { v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12 };
- EXPECT_FALSE(v0->CanBeMoved());
- EXPECT_TRUE(v1->CanBeMoved());
- EXPECT_TRUE(v2->CanBeMoved());
- EXPECT_TRUE(v3->CanBeMoved());
- EXPECT_TRUE(v4->CanBeMoved());
- EXPECT_TRUE(v5->CanBeMoved());
+ EXPECT_FALSE(p0->CanBeMoved());
+ EXPECT_FALSE(p1->CanBeMoved());
+ EXPECT_FALSE(p2->CanBeMoved());
- EXPECT_TRUE(v1->Equals(v1));
- EXPECT_TRUE(v2->Equals(v2));
- EXPECT_TRUE(v3->Equals(v3));
- EXPECT_TRUE(v4->Equals(v4));
- EXPECT_TRUE(v5->Equals(v5));
+ for (HVecHalvingAdd* hadd_insn : hadd_insns) {
+ EXPECT_TRUE(hadd_insn->CanBeMoved());
+ }
- EXPECT_TRUE(v1->IsUnsigned() && v1->IsRounded());
- EXPECT_TRUE(v2->IsUnsigned() && !v2->IsRounded());
- EXPECT_TRUE(!v3->IsUnsigned() && v3->IsRounded());
- EXPECT_TRUE(!v4->IsUnsigned() && !v4->IsRounded());
- EXPECT_TRUE(v5->IsUnsigned() && v5->IsRounded());
+ // Deprecated; IsUnsigned() should be removed with the introduction of Uint32 and Uint64.
+ EXPECT_TRUE(v0->IsUnsigned());
+ EXPECT_TRUE(v1->IsUnsigned());
+ EXPECT_TRUE(!v2->IsUnsigned());
+ EXPECT_TRUE(!v3->IsUnsigned());
+ EXPECT_TRUE(v4->IsUnsigned());
- EXPECT_FALSE(v1->Equals(v2)); // different attributes
- EXPECT_FALSE(v1->Equals(v3)); // different attributes
- EXPECT_FALSE(v1->Equals(v4)); // different attributes
- EXPECT_FALSE(v1->Equals(v5)); // different vector lengths
+ EXPECT_TRUE(v0->IsRounded());
+ EXPECT_TRUE(!v1->IsRounded());
+ EXPECT_TRUE(v2->IsRounded());
+ EXPECT_TRUE(!v3->IsRounded());
+ EXPECT_TRUE(v4->IsRounded());
+ EXPECT_TRUE(v5->IsRounded());
+ EXPECT_TRUE(!v6->IsRounded());
+ EXPECT_TRUE(v7->IsRounded());
+ EXPECT_TRUE(!v8->IsRounded());
+ EXPECT_TRUE(v9->IsRounded());
+ EXPECT_TRUE(!v10->IsRounded());
+ EXPECT_TRUE(v11->IsRounded());
+ EXPECT_TRUE(!v12->IsRounded());
+
+ for (HVecHalvingAdd* hadd_insn1 : hadd_insns) {
+ for (HVecHalvingAdd* hadd_insn2 : hadd_insns) {
+ EXPECT_EQ(hadd_insn1 == hadd_insn2, hadd_insn1->Equals(hadd_insn2));
+ }
+ }
}
TEST_F(NodesVectorTest, VectorOperationMattersOnMultiplyAccumulate) {
- HVecOperation* v0 = new (&allocator_)
- HVecReplicateScalar(&allocator_, parameter_, DataType::Type::kInt32, 4);
+ HVecOperation* v0 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
- HVecMultiplyAccumulate* v1 = new (&allocator_) HVecMultiplyAccumulate(
- &allocator_, HInstruction::kAdd, v0, v0, v0, DataType::Type::kInt32, 4);
- HVecMultiplyAccumulate* v2 = new (&allocator_) HVecMultiplyAccumulate(
- &allocator_, HInstruction::kSub, v0, v0, v0, DataType::Type::kInt32, 4);
- HVecMultiplyAccumulate* v3 = new (&allocator_) HVecMultiplyAccumulate(
- &allocator_, HInstruction::kAdd, v0, v0, v0, DataType::Type::kInt32, 2);
+ HVecMultiplyAccumulate* v1 = new (GetAllocator()) HVecMultiplyAccumulate(
+ GetAllocator(), HInstruction::kAdd, v0, v0, v0, DataType::Type::kInt32, 4, kNoDexPc);
+ HVecMultiplyAccumulate* v2 = new (GetAllocator()) HVecMultiplyAccumulate(
+ GetAllocator(), HInstruction::kSub, v0, v0, v0, DataType::Type::kInt32, 4, kNoDexPc);
+ HVecMultiplyAccumulate* v3 = new (GetAllocator()) HVecMultiplyAccumulate(
+ GetAllocator(), HInstruction::kAdd, v0, v0, v0, DataType::Type::kInt32, 2, kNoDexPc);
EXPECT_FALSE(v0->CanBeMoved());
EXPECT_TRUE(v1->CanBeMoved());
@@ -333,15 +484,15 @@
}
TEST_F(NodesVectorTest, VectorKindMattersOnReduce) {
- HVecOperation* v0 = new (&allocator_)
- HVecReplicateScalar(&allocator_, parameter_, DataType::Type::kInt32, 4);
+ HVecOperation* v0 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
- HVecReduce* v1 = new (&allocator_) HVecReduce(
- &allocator_, v0, DataType::Type::kInt32, 4, HVecReduce::kSum);
- HVecReduce* v2 = new (&allocator_) HVecReduce(
- &allocator_, v0, DataType::Type::kInt32, 4, HVecReduce::kMin);
- HVecReduce* v3 = new (&allocator_) HVecReduce(
- &allocator_, v0, DataType::Type::kInt32, 4, HVecReduce::kMax);
+ HVecReduce* v1 = new (GetAllocator()) HVecReduce(
+ GetAllocator(), v0, DataType::Type::kInt32, 4, HVecReduce::kSum, kNoDexPc);
+ HVecReduce* v2 = new (GetAllocator()) HVecReduce(
+ GetAllocator(), v0, DataType::Type::kInt32, 4, HVecReduce::kMin, kNoDexPc);
+ HVecReduce* v3 = new (GetAllocator()) HVecReduce(
+ GetAllocator(), v0, DataType::Type::kInt32, 4, HVecReduce::kMax, kNoDexPc);
EXPECT_FALSE(v0->CanBeMoved());
EXPECT_TRUE(v1->CanBeMoved());
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index 99d5284..bd65cbf 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -46,19 +46,20 @@
static constexpr bool kGenerateExpected = false;
OptimizingCFITest()
- : pool_(),
- allocator_(&pool_),
+ : pool_and_allocator_(),
opts_(),
isa_features_(),
graph_(nullptr),
code_gen_(),
- blocks_(allocator_.Adapter()) {}
+ blocks_(GetAllocator()->Adapter()) {}
+
+ ArenaAllocator* GetAllocator() { return pool_and_allocator_.GetAllocator(); }
void SetUpFrame(InstructionSet isa) {
// Setup simple context.
std::string error;
isa_features_ = InstructionSetFeatures::FromVariant(isa, "default", &error);
- graph_ = CreateGraph(&allocator_);
+ graph_ = CreateGraph(&pool_and_allocator_);
// Generate simple frame with some spills.
code_gen_ = CodeGenerator::Create(graph_, isa, *isa_features_, opts_);
code_gen_->GetAssembler()->cfi().SetEnabled(true);
@@ -142,8 +143,7 @@
DISALLOW_COPY_AND_ASSIGN(InternalCodeAllocator);
};
- ArenaPool pool_;
- ArenaAllocator allocator_;
+ ArenaPoolAndAllocator pool_and_allocator_;
CompilerOptions opts_;
std::unique_ptr<const InstructionSetFeatures> isa_features_;
HGraph* graph_;
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 1218586..9bfb7a5 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -27,6 +27,7 @@
#endif
#ifdef ART_ENABLE_CODEGEN_mips
+#include "instruction_simplifier_mips.h"
#include "pc_relative_fixups_mips.h"
#endif
@@ -44,6 +45,7 @@
#include "base/dumpable.h"
#include "base/macros.h"
#include "base/mutex.h"
+#include "base/scoped_arena_allocator.h"
#include "base/timing_logger.h"
#include "bounds_check_elimination.h"
#include "builder.h"
@@ -107,8 +109,8 @@
*/
class CodeVectorAllocator FINAL : public CodeAllocator {
public:
- explicit CodeVectorAllocator(ArenaAllocator* arena)
- : memory_(arena->Adapter(kArenaAllocCodeBuffer)),
+ explicit CodeVectorAllocator(ArenaAllocator* allocator)
+ : memory_(allocator->Adapter(kArenaAllocCodeBuffer)),
size_(0) {}
virtual uint8_t* Allocate(size_t size) {
@@ -147,7 +149,7 @@
cached_method_name_(),
timing_logger_enabled_(compiler_driver->GetDumpPasses()),
timing_logger_(timing_logger_enabled_ ? GetMethodName() : "", true, true),
- disasm_info_(graph->GetArena()),
+ disasm_info_(graph->GetAllocator()),
visualizer_oss_(),
visualizer_output_(visualizer_output),
visualizer_enabled_(!compiler_driver->GetCompilerOptions().GetDumpCfgFileName().empty()),
@@ -350,7 +352,7 @@
private:
// Create a 'CompiledMethod' for an optimized graph.
- CompiledMethod* Emit(ArenaAllocator* arena,
+ CompiledMethod* Emit(ArenaAllocator* allocator,
CodeVectorAllocator* code_allocator,
CodeGenerator* codegen,
CompilerDriver* driver,
@@ -363,7 +365,8 @@
// 2) Transforms the graph to SSA. Returns null if it failed.
// 3) Runs optimizations on the graph, including register allocator.
// 4) Generates code with the `code_allocator` provided.
- CodeGenerator* TryCompile(ArenaAllocator* arena,
+ CodeGenerator* TryCompile(ArenaAllocator* allocator,
+ ArenaStack* arena_stack,
CodeVectorAllocator* code_allocator,
const DexFile::CodeItem* code_item,
uint32_t access_flags,
@@ -451,7 +454,7 @@
static HOptimization* BuildOptimization(
const std::string& pass_name,
- ArenaAllocator* arena,
+ ArenaAllocator* allocator,
HGraph* graph,
OptimizingCompilerStats* stats,
CodeGenerator* codegen,
@@ -464,76 +467,79 @@
std::string opt_name = ConvertPassNameToOptimizationName(pass_name);
if (opt_name == BoundsCheckElimination::kBoundsCheckEliminationPassName) {
CHECK(most_recent_side_effects != nullptr && most_recent_induction != nullptr);
- return new (arena) BoundsCheckElimination(graph,
- *most_recent_side_effects,
- most_recent_induction);
+ return new (allocator) BoundsCheckElimination(graph,
+ *most_recent_side_effects,
+ most_recent_induction);
} else if (opt_name == GVNOptimization::kGlobalValueNumberingPassName) {
CHECK(most_recent_side_effects != nullptr);
- return new (arena) GVNOptimization(graph, *most_recent_side_effects, pass_name.c_str());
+ return new (allocator) GVNOptimization(graph, *most_recent_side_effects, pass_name.c_str());
} else if (opt_name == HConstantFolding::kConstantFoldingPassName) {
- return new (arena) HConstantFolding(graph, pass_name.c_str());
+ return new (allocator) HConstantFolding(graph, pass_name.c_str());
} else if (opt_name == HDeadCodeElimination::kDeadCodeEliminationPassName) {
- return new (arena) HDeadCodeElimination(graph, stats, pass_name.c_str());
+ return new (allocator) HDeadCodeElimination(graph, stats, pass_name.c_str());
} else if (opt_name == HInliner::kInlinerPassName) {
size_t number_of_dex_registers = dex_compilation_unit.GetCodeItem()->registers_size_;
- return new (arena) HInliner(graph, // outer_graph
- graph, // outermost_graph
- codegen,
- dex_compilation_unit, // outer_compilation_unit
- dex_compilation_unit, // outermost_compilation_unit
- driver,
- handles,
- stats,
- number_of_dex_registers,
- /* total_number_of_instructions */ 0,
- /* parent */ nullptr);
+ return new (allocator) HInliner(graph, // outer_graph
+ graph, // outermost_graph
+ codegen,
+ dex_compilation_unit, // outer_compilation_unit
+ dex_compilation_unit, // outermost_compilation_unit
+ driver,
+ handles,
+ stats,
+ number_of_dex_registers,
+ /* total_number_of_instructions */ 0,
+ /* parent */ nullptr);
} else if (opt_name == HSharpening::kSharpeningPassName) {
- return new (arena) HSharpening(graph, codegen, dex_compilation_unit, driver, handles);
+ return new (allocator) HSharpening(graph, codegen, dex_compilation_unit, driver, handles);
} else if (opt_name == HSelectGenerator::kSelectGeneratorPassName) {
- return new (arena) HSelectGenerator(graph, handles, stats);
+ return new (allocator) HSelectGenerator(graph, handles, stats);
} else if (opt_name == HInductionVarAnalysis::kInductionPassName) {
- return new (arena) HInductionVarAnalysis(graph);
+ return new (allocator) HInductionVarAnalysis(graph);
} else if (opt_name == InstructionSimplifier::kInstructionSimplifierPassName) {
- return new (arena) InstructionSimplifier(graph, codegen, driver, stats, pass_name.c_str());
+ return new (allocator) InstructionSimplifier(graph, codegen, driver, stats, pass_name.c_str());
} else if (opt_name == IntrinsicsRecognizer::kIntrinsicsRecognizerPassName) {
- return new (arena) IntrinsicsRecognizer(graph, stats);
+ return new (allocator) IntrinsicsRecognizer(graph, stats);
} else if (opt_name == LICM::kLoopInvariantCodeMotionPassName) {
CHECK(most_recent_side_effects != nullptr);
- return new (arena) LICM(graph, *most_recent_side_effects, stats);
+ return new (allocator) LICM(graph, *most_recent_side_effects, stats);
} else if (opt_name == LoadStoreAnalysis::kLoadStoreAnalysisPassName) {
- return new (arena) LoadStoreAnalysis(graph);
+ return new (allocator) LoadStoreAnalysis(graph);
} else if (opt_name == LoadStoreElimination::kLoadStoreEliminationPassName) {
CHECK(most_recent_side_effects != nullptr);
CHECK(most_recent_lsa != nullptr);
- return
- new (arena) LoadStoreElimination(graph, *most_recent_side_effects, *most_recent_lsa, stats);
+ return new (allocator) LoadStoreElimination(graph,
+ *most_recent_side_effects,
+ *most_recent_lsa, stats);
} else if (opt_name == SideEffectsAnalysis::kSideEffectsAnalysisPassName) {
- return new (arena) SideEffectsAnalysis(graph);
+ return new (allocator) SideEffectsAnalysis(graph);
} else if (opt_name == HLoopOptimization::kLoopOptimizationPassName) {
- return new (arena) HLoopOptimization(graph, driver, most_recent_induction, stats);
+ return new (allocator) HLoopOptimization(graph, driver, most_recent_induction, stats);
} else if (opt_name == CHAGuardOptimization::kCHAGuardOptimizationPassName) {
- return new (arena) CHAGuardOptimization(graph);
+ return new (allocator) CHAGuardOptimization(graph);
} else if (opt_name == CodeSinking::kCodeSinkingPassName) {
- return new (arena) CodeSinking(graph, stats);
+ return new (allocator) CodeSinking(graph, stats);
} else if (opt_name == ConstructorFenceRedundancyElimination::kPassName) {
- return new (arena) ConstructorFenceRedundancyElimination(graph, stats);
+ return new (allocator) ConstructorFenceRedundancyElimination(graph, stats);
#ifdef ART_ENABLE_CODEGEN_arm
} else if (opt_name == arm::InstructionSimplifierArm::kInstructionSimplifierArmPassName) {
- return new (arena) arm::InstructionSimplifierArm(graph, stats);
+ return new (allocator) arm::InstructionSimplifierArm(graph, stats);
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
} else if (opt_name == arm64::InstructionSimplifierArm64::kInstructionSimplifierArm64PassName) {
- return new (arena) arm64::InstructionSimplifierArm64(graph, stats);
+ return new (allocator) arm64::InstructionSimplifierArm64(graph, stats);
#endif
#ifdef ART_ENABLE_CODEGEN_mips
} else if (opt_name == mips::PcRelativeFixups::kPcRelativeFixupsMipsPassName) {
- return new (arena) mips::PcRelativeFixups(graph, codegen, stats);
+ return new (allocator) mips::PcRelativeFixups(graph, codegen, stats);
+ } else if (opt_name == mips::InstructionSimplifierMips::kInstructionSimplifierMipsPassName) {
+ return new (allocator) mips::InstructionSimplifierMips(graph, codegen, stats);
#endif
#ifdef ART_ENABLE_CODEGEN_x86
} else if (opt_name == x86::PcRelativeFixups::kPcRelativeFixupsX86PassName) {
- return new (arena) x86::PcRelativeFixups(graph, codegen, stats);
+ return new (allocator) x86::PcRelativeFixups(graph, codegen, stats);
} else if (opt_name == x86::X86MemoryOperandGeneration::kX86MemoryOperandGenerationPassName) {
- return new (arena) x86::X86MemoryOperandGeneration(graph, codegen, stats);
+ return new (allocator) x86::X86MemoryOperandGeneration(graph, codegen, stats);
#endif
}
return nullptr;
@@ -541,7 +547,7 @@
static ArenaVector<HOptimization*> BuildOptimizations(
const std::vector<std::string>& pass_names,
- ArenaAllocator* arena,
+ ArenaAllocator* allocator,
HGraph* graph,
OptimizingCompilerStats* stats,
CodeGenerator* codegen,
@@ -554,11 +560,11 @@
SideEffectsAnalysis* most_recent_side_effects = nullptr;
HInductionVarAnalysis* most_recent_induction = nullptr;
LoadStoreAnalysis* most_recent_lsa = nullptr;
- ArenaVector<HOptimization*> ret(arena->Adapter());
+ ArenaVector<HOptimization*> ret(allocator->Adapter());
for (const std::string& pass_name : pass_names) {
HOptimization* opt = BuildOptimization(
pass_name,
- arena,
+ allocator,
graph,
stats,
codegen,
@@ -605,7 +611,7 @@
return;
}
size_t number_of_dex_registers = dex_compilation_unit.GetCodeItem()->registers_size_;
- HInliner* inliner = new (graph->GetArena()) HInliner(
+ HInliner* inliner = new (graph->GetAllocator()) HInliner(
graph, // outer_graph
graph, // outermost_graph
codegen,
@@ -628,17 +634,18 @@
PassObserver* pass_observer) const {
UNUSED(codegen); // To avoid compilation error when compiling for svelte
OptimizingCompilerStats* stats = compilation_stats_.get();
- ArenaAllocator* arena = graph->GetArena();
+ ArenaAllocator* allocator = graph->GetAllocator();
switch (instruction_set) {
#if defined(ART_ENABLE_CODEGEN_arm)
case kThumb2:
case kArm: {
arm::InstructionSimplifierArm* simplifier =
- new (arena) arm::InstructionSimplifierArm(graph, stats);
- SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
- GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch");
+ new (allocator) arm::InstructionSimplifierArm(graph, stats);
+ SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
+ GVNOptimization* gvn =
+ new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
HInstructionScheduling* scheduling =
- new (arena) HInstructionScheduling(graph, instruction_set, codegen);
+ new (allocator) HInstructionScheduling(graph, instruction_set, codegen);
HOptimization* arm_optimizations[] = {
simplifier,
side_effects,
@@ -652,11 +659,12 @@
#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64: {
arm64::InstructionSimplifierArm64* simplifier =
- new (arena) arm64::InstructionSimplifierArm64(graph, stats);
- SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
- GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch");
+ new (allocator) arm64::InstructionSimplifierArm64(graph, stats);
+ SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
+ GVNOptimization* gvn =
+ new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
HInstructionScheduling* scheduling =
- new (arena) HInstructionScheduling(graph, instruction_set);
+ new (allocator) HInstructionScheduling(graph, instruction_set);
HOptimization* arm64_optimizations[] = {
simplifier,
side_effects,
@@ -669,11 +677,15 @@
#endif
#ifdef ART_ENABLE_CODEGEN_mips
case kMips: {
- SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
- GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch");
+ mips::InstructionSimplifierMips* simplifier =
+ new (allocator) mips::InstructionSimplifierMips(graph, codegen, stats);
+ SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
+ GVNOptimization* gvn =
+ new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
mips::PcRelativeFixups* pc_relative_fixups =
- new (arena) mips::PcRelativeFixups(graph, codegen, stats);
+ new (allocator) mips::PcRelativeFixups(graph, codegen, stats);
HOptimization* mips_optimizations[] = {
+ simplifier,
side_effects,
gvn,
pc_relative_fixups,
@@ -684,8 +696,9 @@
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64: {
- SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
- GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch");
+ SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
+ GVNOptimization* gvn =
+ new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
HOptimization* mips64_optimizations[] = {
side_effects,
gvn,
@@ -696,12 +709,13 @@
#endif
#ifdef ART_ENABLE_CODEGEN_x86
case kX86: {
- SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
- GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch");
+ SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
+ GVNOptimization* gvn =
+ new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
x86::PcRelativeFixups* pc_relative_fixups =
- new (arena) x86::PcRelativeFixups(graph, codegen, stats);
+ new (allocator) x86::PcRelativeFixups(graph, codegen, stats);
x86::X86MemoryOperandGeneration* memory_gen =
- new (arena) x86::X86MemoryOperandGeneration(graph, codegen, stats);
+ new (allocator) x86::X86MemoryOperandGeneration(graph, codegen, stats);
HOptimization* x86_optimizations[] = {
side_effects,
gvn,
@@ -714,10 +728,11 @@
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64: {
- SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
- GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch");
+ SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
+ GVNOptimization* gvn =
+ new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
x86::X86MemoryOperandGeneration* memory_gen =
- new (arena) x86::X86MemoryOperandGeneration(graph, codegen, stats);
+ new (allocator) x86::X86MemoryOperandGeneration(graph, codegen, stats);
HOptimization* x86_64_optimizations[] = {
side_effects,
gvn,
@@ -743,14 +758,19 @@
pass_observer);
PrepareForRegisterAllocation(graph, stats).Run();
}
- SsaLivenessAnalysis liveness(graph, codegen);
+ // Use local allocator shared by SSA liveness analysis and register allocator.
+ // (Register allocator creates new objects in the liveness data.)
+ ScopedArenaAllocator local_allocator(graph->GetArenaStack());
+ SsaLivenessAnalysis liveness(graph, codegen, &local_allocator);
{
PassScope scope(SsaLivenessAnalysis::kLivenessPassName, pass_observer);
liveness.Analyze();
}
{
PassScope scope(RegisterAllocator::kRegisterAllocatorPassName, pass_observer);
- RegisterAllocator::Create(graph->GetArena(), codegen, liveness, strategy)->AllocateRegisters();
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(&local_allocator, codegen, liveness, strategy);
+ register_allocator->AllocateRegisters();
}
}
@@ -761,11 +781,11 @@
PassObserver* pass_observer,
VariableSizedHandleScope* handles) const {
OptimizingCompilerStats* stats = compilation_stats_.get();
- ArenaAllocator* arena = graph->GetArena();
+ ArenaAllocator* allocator = graph->GetAllocator();
if (driver->GetCompilerOptions().GetPassesToRun() != nullptr) {
ArenaVector<HOptimization*> optimizations = BuildOptimizations(
*driver->GetCompilerOptions().GetPassesToRun(),
- arena,
+ allocator,
graph,
stats,
codegen,
@@ -776,43 +796,45 @@
return;
}
- HDeadCodeElimination* dce1 = new (arena) HDeadCodeElimination(
+ HDeadCodeElimination* dce1 = new (allocator) HDeadCodeElimination(
graph, stats, "dead_code_elimination$initial");
- HDeadCodeElimination* dce2 = new (arena) HDeadCodeElimination(
+ HDeadCodeElimination* dce2 = new (allocator) HDeadCodeElimination(
graph, stats, "dead_code_elimination$after_inlining");
- HDeadCodeElimination* dce3 = new (arena) HDeadCodeElimination(
+ HDeadCodeElimination* dce3 = new (allocator) HDeadCodeElimination(
graph, stats, "dead_code_elimination$final");
- HConstantFolding* fold1 = new (arena) HConstantFolding(graph, "constant_folding");
- InstructionSimplifier* simplify1 = new (arena) InstructionSimplifier(
+ HConstantFolding* fold1 = new (allocator) HConstantFolding(graph, "constant_folding");
+ InstructionSimplifier* simplify1 = new (allocator) InstructionSimplifier(
graph, codegen, driver, stats);
- HSelectGenerator* select_generator = new (arena) HSelectGenerator(graph, handles, stats);
- HConstantFolding* fold2 = new (arena) HConstantFolding(
+ HSelectGenerator* select_generator = new (allocator) HSelectGenerator(graph, handles, stats);
+ HConstantFolding* fold2 = new (allocator) HConstantFolding(
graph, "constant_folding$after_inlining");
- HConstantFolding* fold3 = new (arena) HConstantFolding(graph, "constant_folding$after_bce");
- SideEffectsAnalysis* side_effects1 = new (arena) SideEffectsAnalysis(
+ HConstantFolding* fold3 = new (allocator) HConstantFolding(graph, "constant_folding$after_bce");
+ SideEffectsAnalysis* side_effects1 = new (allocator) SideEffectsAnalysis(
graph, "side_effects$before_gvn");
- SideEffectsAnalysis* side_effects2 = new (arena) SideEffectsAnalysis(
+ SideEffectsAnalysis* side_effects2 = new (allocator) SideEffectsAnalysis(
graph, "side_effects$before_lse");
- GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects1);
- LICM* licm = new (arena) LICM(graph, *side_effects1, stats);
- HInductionVarAnalysis* induction = new (arena) HInductionVarAnalysis(graph);
- BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph, *side_effects1, induction);
- HLoopOptimization* loop = new (arena) HLoopOptimization(graph, driver, induction, stats);
- LoadStoreAnalysis* lsa = new (arena) LoadStoreAnalysis(graph);
- LoadStoreElimination* lse = new (arena) LoadStoreElimination(graph, *side_effects2, *lsa, stats);
- HSharpening* sharpening = new (arena) HSharpening(
+ GVNOptimization* gvn = new (allocator) GVNOptimization(graph, *side_effects1);
+ LICM* licm = new (allocator) LICM(graph, *side_effects1, stats);
+ HInductionVarAnalysis* induction = new (allocator) HInductionVarAnalysis(graph);
+ BoundsCheckElimination* bce =
+ new (allocator) BoundsCheckElimination(graph, *side_effects1, induction);
+ HLoopOptimization* loop = new (allocator) HLoopOptimization(graph, driver, induction, stats);
+ LoadStoreAnalysis* lsa = new (allocator) LoadStoreAnalysis(graph);
+ LoadStoreElimination* lse =
+ new (allocator) LoadStoreElimination(graph, *side_effects2, *lsa, stats);
+ HSharpening* sharpening = new (allocator) HSharpening(
graph, codegen, dex_compilation_unit, driver, handles);
- InstructionSimplifier* simplify2 = new (arena) InstructionSimplifier(
+ InstructionSimplifier* simplify2 = new (allocator) InstructionSimplifier(
graph, codegen, driver, stats, "instruction_simplifier$after_inlining");
- InstructionSimplifier* simplify3 = new (arena) InstructionSimplifier(
+ InstructionSimplifier* simplify3 = new (allocator) InstructionSimplifier(
graph, codegen, driver, stats, "instruction_simplifier$after_bce");
- InstructionSimplifier* simplify4 = new (arena) InstructionSimplifier(
+ InstructionSimplifier* simplify4 = new (allocator) InstructionSimplifier(
graph, codegen, driver, stats, "instruction_simplifier$before_codegen");
- IntrinsicsRecognizer* intrinsics = new (arena) IntrinsicsRecognizer(graph, stats);
- CHAGuardOptimization* cha_guard = new (arena) CHAGuardOptimization(graph);
- CodeSinking* code_sinking = new (arena) CodeSinking(graph, stats);
+ IntrinsicsRecognizer* intrinsics = new (allocator) IntrinsicsRecognizer(graph, stats);
+ CHAGuardOptimization* cha_guard = new (allocator) CHAGuardOptimization(graph);
+ CodeSinking* code_sinking = new (allocator) CodeSinking(graph, stats);
ConstructorFenceRedundancyElimination* cfre =
- new (arena) ConstructorFenceRedundancyElimination(graph, stats);
+ new (allocator) ConstructorFenceRedundancyElimination(graph, stats);
HOptimization* optimizations1[] = {
intrinsics,
@@ -859,7 +881,7 @@
}
static ArenaVector<linker::LinkerPatch> EmitAndSortLinkerPatches(CodeGenerator* codegen) {
- ArenaVector<linker::LinkerPatch> linker_patches(codegen->GetGraph()->GetArena()->Adapter());
+ ArenaVector<linker::LinkerPatch> linker_patches(codegen->GetGraph()->GetAllocator()->Adapter());
codegen->EmitLinkerPatches(&linker_patches);
// Sort patches by literal offset. Required for .oat_patches encoding.
@@ -871,14 +893,14 @@
return linker_patches;
}
-CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* arena,
+CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* allocator,
CodeVectorAllocator* code_allocator,
CodeGenerator* codegen,
CompilerDriver* compiler_driver,
const DexFile::CodeItem* code_item) const {
ArenaVector<linker::LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen);
- ArenaVector<uint8_t> stack_map(arena->Adapter(kArenaAllocStackMaps));
- ArenaVector<uint8_t> method_info(arena->Adapter(kArenaAllocStackMaps));
+ ArenaVector<uint8_t> stack_map(allocator->Adapter(kArenaAllocStackMaps));
+ ArenaVector<uint8_t> method_info(allocator->Adapter(kArenaAllocStackMaps));
size_t stack_map_size = 0;
size_t method_info_size = 0;
codegen->ComputeStackMapAndMethodInfoSize(&stack_map_size, &method_info_size);
@@ -906,7 +928,8 @@
return compiled_method;
}
-CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
+CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
+ ArenaStack* arena_stack,
CodeVectorAllocator* code_allocator,
const DexFile::CodeItem* code_item,
uint32_t access_flags,
@@ -964,8 +987,9 @@
/* verified_method */ nullptr,
dex_cache);
- HGraph* graph = new (arena) HGraph(
- arena,
+ HGraph* graph = new (allocator) HGraph(
+ allocator,
+ arena_stack,
dex_file,
method_idx,
compiler_driver->GetInstructionSet(),
@@ -1018,7 +1042,6 @@
codegen.get(),
compilation_stats_.get(),
interpreter_metadata,
- dex_cache,
handles);
GraphAnalysisResult result = builder.BuildGraph();
if (result != kAnalysisSuccess) {
@@ -1085,11 +1108,12 @@
DCHECK(Runtime::Current()->IsAotCompiler());
const VerifiedMethod* verified_method = compiler_driver->GetVerifiedMethod(&dex_file, method_idx);
DCHECK(!verified_method->HasRuntimeThrow());
- if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file)
- || verifier::CanCompilerHandleVerificationFailure(
- verified_method->GetEncounteredVerificationFailures())) {
- ArenaAllocator arena(Runtime::Current()->GetArenaPool());
- CodeVectorAllocator code_allocator(&arena);
+ if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file) ||
+ verifier::CanCompilerHandleVerificationFailure(
+ verified_method->GetEncounteredVerificationFailures())) {
+ ArenaAllocator allocator(Runtime::Current()->GetArenaPool());
+ ArenaStack arena_stack(Runtime::Current()->GetArenaPool());
+ CodeVectorAllocator code_allocator(&allocator);
std::unique_ptr<CodeGenerator> codegen;
{
ScopedObjectAccess soa(Thread::Current());
@@ -1097,7 +1121,8 @@
// Go to native so that we don't block GC during compilation.
ScopedThreadSuspension sts(soa.Self(), kNative);
codegen.reset(
- TryCompile(&arena,
+ TryCompile(&allocator,
+ &arena_stack,
&code_allocator,
code_item,
access_flags,
@@ -1114,12 +1139,16 @@
if (codegen.get() != nullptr) {
MaybeRecordStat(compilation_stats_.get(),
MethodCompilationStat::kCompiled);
- method = Emit(&arena, &code_allocator, codegen.get(), compiler_driver, code_item);
+ method = Emit(&allocator, &code_allocator, codegen.get(), compiler_driver, code_item);
if (kArenaAllocatorCountAllocations) {
- if (arena.BytesAllocated() > kArenaAllocatorMemoryReportThreshold) {
- MemStats mem_stats(arena.GetMemStats());
- LOG(INFO) << dex_file.PrettyMethod(method_idx) << " " << Dumpable<MemStats>(mem_stats);
+ size_t total_allocated = allocator.BytesAllocated() + arena_stack.PeakBytesAllocated();
+ if (total_allocated > kArenaAllocatorMemoryReportThreshold) {
+ MemStats mem_stats(allocator.GetMemStats());
+ MemStats peak_stats(arena_stack.GetPeakStats());
+ LOG(INFO) << dex_file.PrettyMethod(method_idx)
+ << "\n" << Dumpable<MemStats>(mem_stats)
+ << "\n" << Dumpable<MemStats>(peak_stats);
}
}
}
@@ -1194,8 +1223,9 @@
const uint32_t access_flags = method->GetAccessFlags();
const InvokeType invoke_type = method->GetInvokeType();
- ArenaAllocator arena(Runtime::Current()->GetJitArenaPool());
- CodeVectorAllocator code_allocator(&arena);
+ ArenaAllocator allocator(Runtime::Current()->GetJitArenaPool());
+ ArenaStack arena_stack(Runtime::Current()->GetJitArenaPool());
+ CodeVectorAllocator code_allocator(&allocator);
VariableSizedHandleScope handles(self);
std::unique_ptr<CodeGenerator> codegen;
@@ -1203,7 +1233,8 @@
// Go to native so that we don't block GC during compilation.
ScopedThreadSuspension sts(self, kNative);
codegen.reset(
- TryCompile(&arena,
+ TryCompile(&allocator,
+ &arena_stack,
&code_allocator,
code_item,
access_flags,
@@ -1221,9 +1252,13 @@
}
if (kArenaAllocatorCountAllocations) {
- if (arena.BytesAllocated() > kArenaAllocatorMemoryReportThreshold) {
- MemStats mem_stats(arena.GetMemStats());
- LOG(INFO) << dex_file->PrettyMethod(method_idx) << " " << Dumpable<MemStats>(mem_stats);
+ size_t total_allocated = allocator.BytesAllocated() + arena_stack.PeakBytesAllocated();
+ if (total_allocated > kArenaAllocatorMemoryReportThreshold) {
+ MemStats mem_stats(allocator.GetMemStats());
+ MemStats peak_stats(arena_stack.GetPeakStats());
+ LOG(INFO) << dex_file->PrettyMethod(method_idx)
+ << "\n" << Dumpable<MemStats>(mem_stats)
+ << "\n" << Dumpable<MemStats>(peak_stats);
}
}
}
@@ -1315,7 +1350,7 @@
CreateJITCodeEntryForAddress(code_address, std::move(elf_file));
}
- Runtime::Current()->GetJit()->AddMemoryUsage(method, arena.BytesUsed());
+ Runtime::Current()->GetJit()->AddMemoryUsage(method, allocator.BytesUsed());
if (jit_logger != nullptr) {
jit_logger->WriteLog(code, code_allocator.GetSize(), method);
}
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index 33f1a4a..5632f9a 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_OPTIMIZING_OPTIMIZING_UNIT_TEST_H_
#define ART_COMPILER_OPTIMIZING_OPTIMIZING_UNIT_TEST_H_
+#include "base/scoped_arena_allocator.h"
#include "builder.h"
#include "common_compiler_test.h"
#include "dex_file.h"
@@ -47,7 +48,7 @@
LiveInterval* BuildInterval(const size_t ranges[][2],
size_t number_of_ranges,
- ArenaAllocator* allocator,
+ ScopedArenaAllocator* allocator,
int reg = -1,
HInstruction* defined_by = nullptr) {
LiveInterval* interval =
@@ -78,30 +79,69 @@
}
}
-inline HGraph* CreateGraph(ArenaAllocator* allocator) {
- return new (allocator) HGraph(
- allocator,
- *reinterpret_cast<DexFile*>(allocator->Alloc(sizeof(DexFile))),
+class ArenaPoolAndAllocator {
+ public:
+ ArenaPoolAndAllocator()
+ : pool_(), allocator_(&pool_), arena_stack_(&pool_), scoped_allocator_(&arena_stack_) { }
+
+ ArenaAllocator* GetAllocator() { return &allocator_; }
+ ArenaStack* GetArenaStack() { return &arena_stack_; }
+ ScopedArenaAllocator* GetScopedAllocator() { return &scoped_allocator_; }
+
+ private:
+ ArenaPool pool_;
+ ArenaAllocator allocator_;
+ ArenaStack arena_stack_;
+ ScopedArenaAllocator scoped_allocator_;
+};
+
+inline HGraph* CreateGraph(ArenaPoolAndAllocator* pool_and_allocator) {
+ return new (pool_and_allocator->GetAllocator()) HGraph(
+ pool_and_allocator->GetAllocator(),
+ pool_and_allocator->GetArenaStack(),
+ *reinterpret_cast<DexFile*>(pool_and_allocator->GetAllocator()->Alloc(sizeof(DexFile))),
/*method_idx*/-1,
kRuntimeISA);
}
-// Create a control-flow graph from Dex instructions.
-inline HGraph* CreateCFG(ArenaAllocator* allocator,
- const uint16_t* data,
- DataType::Type return_type = DataType::Type::kInt32) {
- const DexFile::CodeItem* item =
- reinterpret_cast<const DexFile::CodeItem*>(data);
- HGraph* graph = CreateGraph(allocator);
+class OptimizingUnitTest : public CommonCompilerTest {
+ protected:
+ OptimizingUnitTest() : pool_and_allocator_(new ArenaPoolAndAllocator()) { }
- {
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope handles(soa.Self());
- HGraphBuilder builder(graph, *item, &handles, return_type);
- bool graph_built = (builder.BuildGraph() == kAnalysisSuccess);
- return graph_built ? graph : nullptr;
+ ArenaAllocator* GetAllocator() { return pool_and_allocator_->GetAllocator(); }
+ ArenaStack* GetArenaStack() { return pool_and_allocator_->GetArenaStack(); }
+ ScopedArenaAllocator* GetScopedAllocator() { return pool_and_allocator_->GetScopedAllocator(); }
+
+ void ResetPoolAndAllocator() {
+ pool_and_allocator_.reset(new ArenaPoolAndAllocator());
+ handles_.reset(); // When getting rid of the old HGraph, we can also reset handles_.
}
-}
+
+ HGraph* CreateGraph() {
+ return art::CreateGraph(pool_and_allocator_.get());
+ }
+
+ // Create a control-flow graph from Dex instructions.
+ HGraph* CreateCFG(const uint16_t* data, DataType::Type return_type = DataType::Type::kInt32) {
+ const DexFile::CodeItem* item =
+ reinterpret_cast<const DexFile::CodeItem*>(data);
+ HGraph* graph = CreateGraph();
+
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ if (handles_ == nullptr) {
+ handles_.reset(new VariableSizedHandleScope(soa.Self()));
+ }
+ HGraphBuilder builder(graph, *item, handles_.get(), return_type);
+ bool graph_built = (builder.BuildGraph() == kAnalysisSuccess);
+ return graph_built ? graph : nullptr;
+ }
+ }
+
+ private:
+ std::unique_ptr<ArenaPoolAndAllocator> pool_and_allocator_;
+ std::unique_ptr<VariableSizedHandleScope> handles_;
+};
// Naive string diff data type.
typedef std::list<std::pair<std::string, std::string>> diff_t;
diff --git a/compiler/optimizing/pc_relative_fixups_mips.cc b/compiler/optimizing/pc_relative_fixups_mips.cc
index e569b78..9d53585 100644
--- a/compiler/optimizing/pc_relative_fixups_mips.cc
+++ b/compiler/optimizing/pc_relative_fixups_mips.cc
@@ -52,7 +52,7 @@
}
// Insert the base at the start of the entry block, move it to a better
// position later in MoveBaseIfNeeded().
- base_ = new (GetGraph()->GetArena()) HMipsComputeBaseMethodAddress();
+ base_ = new (GetGraph()->GetAllocator()) HMipsComputeBaseMethodAddress();
HBasicBlock* entry_block = GetGraph()->GetEntryBlock();
entry_block->InsertInstructionBefore(base_, entry_block->GetFirstInstruction());
DCHECK(base_ != nullptr);
@@ -112,7 +112,7 @@
InitializePCRelativeBasePointer();
HGraph* graph = GetGraph();
HBasicBlock* block = switch_insn->GetBlock();
- HMipsPackedSwitch* mips_switch = new (graph->GetArena()) HMipsPackedSwitch(
+ HMipsPackedSwitch* mips_switch = new (graph->GetAllocator()) HMipsPackedSwitch(
switch_insn->GetStartValue(),
switch_insn->GetNumEntries(),
switch_insn->InputAt(0),
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index a114e78..f92f4b2 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -137,7 +137,7 @@
HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(neg);
HGraph* graph = GetGraph();
HBasicBlock* block = neg->GetBlock();
- HX86FPNeg* x86_fp_neg = new (graph->GetArena()) HX86FPNeg(
+ HX86FPNeg* x86_fp_neg = new (graph->GetAllocator()) HX86FPNeg(
neg->GetType(),
neg->InputAt(0),
method_address,
@@ -156,7 +156,7 @@
HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(switch_insn);
HGraph* graph = GetGraph();
HBasicBlock* block = switch_insn->GetBlock();
- HX86PackedSwitch* x86_switch = new (graph->GetArena()) HX86PackedSwitch(
+ HX86PackedSwitch* x86_switch = new (graph->GetAllocator()) HX86PackedSwitch(
switch_insn->GetStartValue(),
switch_insn->GetNumEntries(),
switch_insn->InputAt(0),
@@ -176,7 +176,7 @@
// Insert the base at the start of the entry block, move it to a better
// position later in MoveBaseIfNeeded().
HX86ComputeBaseMethodAddress* method_address =
- new (GetGraph()->GetArena()) HX86ComputeBaseMethodAddress();
+ new (GetGraph()->GetAllocator()) HX86ComputeBaseMethodAddress();
if (has_irreducible_loops) {
cursor->GetBlock()->InsertInstructionBefore(method_address, cursor);
} else {
@@ -190,7 +190,7 @@
void ReplaceInput(HInstruction* insn, HConstant* value, int input_index, bool materialize) {
HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(insn);
HX86LoadFromConstantTable* load_constant =
- new (GetGraph()->GetArena()) HX86LoadFromConstantTable(method_address, value);
+ new (GetGraph()->GetAllocator()) HX86LoadFromConstantTable(method_address, value);
if (!materialize) {
load_constant->MarkEmittedAtUseSite();
}
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index b52de36..fe98aa9 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -56,12 +56,12 @@
// Add a fake environment for String.charAt() inline info as we want
// the exception to appear as being thrown from there.
ArtMethod* char_at_method = jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt);
- ArenaAllocator* arena = GetGraph()->GetArena();
- HEnvironment* environment = new (arena) HEnvironment(arena,
- /* number_of_vregs */ 0u,
- char_at_method,
- /* dex_pc */ dex::kDexNoIndex,
- check);
+ ArenaAllocator* allocator = GetGraph()->GetAllocator();
+ HEnvironment* environment = new (allocator) HEnvironment(allocator,
+ /* number_of_vregs */ 0u,
+ char_at_method,
+ /* dex_pc */ dex::kDexNoIndex,
+ check);
check->InsertRawEnvironment(environment);
}
}
diff --git a/compiler/optimizing/pretty_printer_test.cc b/compiler/optimizing/pretty_printer_test.cc
index 14d2360..4aec6d3 100644
--- a/compiler/optimizing/pretty_printer_test.cc
+++ b/compiler/optimizing/pretty_printer_test.cc
@@ -27,17 +27,18 @@
namespace art {
-static void TestCode(const uint16_t* data, const char* expected) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+class PrettyPrinterTest : public OptimizingUnitTest {
+ protected:
+ void TestCode(const uint16_t* data, const char* expected);
+};
+
+void PrettyPrinterTest::TestCode(const uint16_t* data, const char* expected) {
+ HGraph* graph = CreateCFG(data);
StringPrettyPrinter printer(graph);
printer.VisitInsertionOrder();
ASSERT_STREQ(expected, printer.str().c_str());
}
-class PrettyPrinterTest : public CommonCompilerTest {};
-
TEST_F(PrettyPrinterTest, ReturnVoid) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::RETURN_VOID);
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index f5064c3..6d9ebc8 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -122,7 +122,7 @@
class_loader_(class_loader),
hint_dex_cache_(hint_dex_cache),
handle_cache_(handles),
- worklist_(graph->GetArena()->Adapter(kArenaAllocReferenceTypePropagation)),
+ worklist_(graph->GetAllocator()->Adapter(kArenaAllocReferenceTypePropagation)),
is_first_run_(is_first_run) {
}
@@ -235,7 +235,7 @@
: start_block->GetFirstInstruction();
if (ShouldCreateBoundType(
insert_point, receiver, class_rti, start_instruction, start_block)) {
- bound_type = new (receiver->GetBlock()->GetGraph()->GetArena()) HBoundType(receiver);
+ bound_type = new (receiver->GetBlock()->GetGraph()->GetAllocator()) HBoundType(receiver);
bound_type->SetUpperBound(class_rti, /* bound_can_be_null */ false);
start_block->InsertInstructionBefore(bound_type, insert_point);
// To comply with the RTP algorithm, don't type the bound type just yet, it will
diff --git a/compiler/optimizing/reference_type_propagation_test.cc b/compiler/optimizing/reference_type_propagation_test.cc
index cb2af91..028b6d3b7 100644
--- a/compiler/optimizing/reference_type_propagation_test.cc
+++ b/compiler/optimizing/reference_type_propagation_test.cc
@@ -28,22 +28,20 @@
* Fixture class for unit testing the ReferenceTypePropagation phase. Used to verify the
* functionality of methods and situations that are hard to set up with checker tests.
*/
-class ReferenceTypePropagationTest : public CommonCompilerTest {
+class ReferenceTypePropagationTest : public OptimizingUnitTest {
public:
- ReferenceTypePropagationTest() : pool_(), allocator_(&pool_), propagation_(nullptr) {
- graph_ = CreateGraph(&allocator_);
- }
+ ReferenceTypePropagationTest() : graph_(CreateGraph()), propagation_(nullptr) { }
~ReferenceTypePropagationTest() { }
void SetupPropagation(VariableSizedHandleScope* handles) {
graph_->InitializeInexactObjectRTI(handles);
- propagation_ = new (&allocator_) ReferenceTypePropagation(graph_,
- Handle<mirror::ClassLoader>(),
- Handle<mirror::DexCache>(),
- handles,
- true,
- "test_prop");
+ propagation_ = new (GetAllocator()) ReferenceTypePropagation(graph_,
+ Handle<mirror::ClassLoader>(),
+ Handle<mirror::DexCache>(),
+ handles,
+ true,
+ "test_prop");
}
// Relay method to merge type in reference type propagation.
@@ -68,8 +66,6 @@
}
// General building fields.
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
ReferenceTypePropagation* propagation_;
diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc
index f0057c3..5ed9e02 100644
--- a/compiler/optimizing/register_allocation_resolver.cc
+++ b/compiler/optimizing/register_allocation_resolver.cc
@@ -22,10 +22,9 @@
namespace art {
-RegisterAllocationResolver::RegisterAllocationResolver(ArenaAllocator* allocator,
- CodeGenerator* codegen,
+RegisterAllocationResolver::RegisterAllocationResolver(CodeGenerator* codegen,
const SsaLivenessAnalysis& liveness)
- : allocator_(allocator),
+ : allocator_(codegen->GetGraph()->GetAllocator()),
codegen_(codegen),
liveness_(liveness) {}
@@ -36,7 +35,7 @@
size_t float_spill_slots,
size_t double_spill_slots,
size_t catch_phi_spill_slots,
- const ArenaVector<LiveInterval*>& temp_intervals) {
+ ArrayRef<LiveInterval* const> temp_intervals) {
size_t spill_slots = int_spill_slots
+ long_spill_slots
+ float_spill_slots
@@ -112,6 +111,7 @@
case DataType::Type::kReference:
case DataType::Type::kInt32:
case DataType::Type::kUint16:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kBool:
case DataType::Type::kInt16:
diff --git a/compiler/optimizing/register_allocation_resolver.h b/compiler/optimizing/register_allocation_resolver.h
index 4a148e0..2783717 100644
--- a/compiler/optimizing/register_allocation_resolver.h
+++ b/compiler/optimizing/register_allocation_resolver.h
@@ -17,7 +17,6 @@
#ifndef ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATION_RESOLVER_H_
#define ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATION_RESOLVER_H_
-#include "base/arena_containers.h"
#include "base/array_ref.h"
#include "base/value_object.h"
#include "data_type.h"
@@ -40,9 +39,7 @@
*/
class RegisterAllocationResolver : ValueObject {
public:
- RegisterAllocationResolver(ArenaAllocator* allocator,
- CodeGenerator* codegen,
- const SsaLivenessAnalysis& liveness);
+ RegisterAllocationResolver(CodeGenerator* codegen, const SsaLivenessAnalysis& liveness);
void Resolve(ArrayRef<HInstruction* const> safepoints,
size_t reserved_out_slots, // Includes slot(s) for the art method.
@@ -51,7 +48,7 @@
size_t float_spill_slots,
size_t double_spill_slots,
size_t catch_phi_spill_slots,
- const ArenaVector<LiveInterval*>& temp_intervals);
+ ArrayRef<LiveInterval* const> temp_intervals);
private:
// Update live registers of safepoint location summary.
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index c3b33e2..ece9904 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -19,6 +19,8 @@
#include <iostream>
#include <sstream>
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
#include "base/bit_vector-inl.h"
#include "code_generator.h"
#include "register_allocator_graph_color.h"
@@ -27,22 +29,24 @@
namespace art {
-RegisterAllocator::RegisterAllocator(ArenaAllocator* allocator,
+RegisterAllocator::RegisterAllocator(ScopedArenaAllocator* allocator,
CodeGenerator* codegen,
const SsaLivenessAnalysis& liveness)
: allocator_(allocator),
codegen_(codegen),
liveness_(liveness) {}
-RegisterAllocator* RegisterAllocator::Create(ArenaAllocator* allocator,
- CodeGenerator* codegen,
- const SsaLivenessAnalysis& analysis,
- Strategy strategy) {
+std::unique_ptr<RegisterAllocator> RegisterAllocator::Create(ScopedArenaAllocator* allocator,
+ CodeGenerator* codegen,
+ const SsaLivenessAnalysis& analysis,
+ Strategy strategy) {
switch (strategy) {
case kRegisterAllocatorLinearScan:
- return new (allocator) RegisterAllocatorLinearScan(allocator, codegen, analysis);
+ return std::unique_ptr<RegisterAllocator>(
+ new (allocator) RegisterAllocatorLinearScan(allocator, codegen, analysis));
case kRegisterAllocatorGraphColor:
- return new (allocator) RegisterAllocatorGraphColor(allocator, codegen, analysis);
+ return std::unique_ptr<RegisterAllocator>(
+ new (allocator) RegisterAllocatorGraphColor(allocator, codegen, analysis));
default:
LOG(FATAL) << "Invalid register allocation strategy: " << strategy;
UNREACHABLE();
@@ -87,18 +91,18 @@
DISALLOW_COPY_AND_ASSIGN(AllRangesIterator);
};
-bool RegisterAllocator::ValidateIntervals(const ArenaVector<LiveInterval*>& intervals,
+bool RegisterAllocator::ValidateIntervals(ArrayRef<LiveInterval* const> intervals,
size_t number_of_spill_slots,
size_t number_of_out_slots,
const CodeGenerator& codegen,
- ArenaAllocator* allocator,
bool processing_core_registers,
bool log_fatal_on_failure) {
size_t number_of_registers = processing_core_registers
? codegen.GetNumberOfCoreRegisters()
: codegen.GetNumberOfFloatingPointRegisters();
- ArenaVector<ArenaBitVector*> liveness_of_values(
- allocator->Adapter(kArenaAllocRegisterAllocatorValidate));
+ ScopedArenaAllocator allocator(codegen.GetGraph()->GetArenaStack());
+ ScopedArenaVector<ArenaBitVector*> liveness_of_values(
+ allocator.Adapter(kArenaAllocRegisterAllocatorValidate));
liveness_of_values.reserve(number_of_registers + number_of_spill_slots);
size_t max_end = 0u;
@@ -112,7 +116,8 @@
// allocated will populate the associated bit vector based on its live ranges.
for (size_t i = 0; i < number_of_registers + number_of_spill_slots; ++i) {
liveness_of_values.push_back(
- ArenaBitVector::Create(allocator, max_end, false, kArenaAllocRegisterAllocatorValidate));
+ ArenaBitVector::Create(&allocator, max_end, false, kArenaAllocRegisterAllocatorValidate));
+ liveness_of_values.back()->ClearAllBits();
}
for (LiveInterval* start_interval : intervals) {
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index 4375d68..eaeec3b 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -18,7 +18,7 @@
#define ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_H_
#include "arch/instruction_set.h"
-#include "base/arena_containers.h"
+#include "base/array_ref.h"
#include "base/arena_object.h"
#include "base/macros.h"
@@ -36,7 +36,7 @@
/**
* Base class for any register allocator.
*/
-class RegisterAllocator : public ArenaObject<kArenaAllocRegisterAllocator> {
+class RegisterAllocator : public DeletableArenaObject<kArenaAllocRegisterAllocator> {
public:
enum Strategy {
kRegisterAllocatorLinearScan,
@@ -45,10 +45,10 @@
static constexpr Strategy kRegisterAllocatorDefault = kRegisterAllocatorLinearScan;
- static RegisterAllocator* Create(ArenaAllocator* allocator,
- CodeGenerator* codegen,
- const SsaLivenessAnalysis& analysis,
- Strategy strategy = kRegisterAllocatorDefault);
+ static std::unique_ptr<RegisterAllocator> Create(ScopedArenaAllocator* allocator,
+ CodeGenerator* codegen,
+ const SsaLivenessAnalysis& analysis,
+ Strategy strategy = kRegisterAllocatorDefault);
virtual ~RegisterAllocator() = default;
@@ -64,18 +64,17 @@
InstructionSet instruction_set);
// Verifies that live intervals do not conflict. Used by unit testing.
- static bool ValidateIntervals(const ArenaVector<LiveInterval*>& intervals,
+ static bool ValidateIntervals(ArrayRef<LiveInterval* const> intervals,
size_t number_of_spill_slots,
size_t number_of_out_slots,
const CodeGenerator& codegen,
- ArenaAllocator* allocator,
bool processing_core_registers,
bool log_fatal_on_failure);
static constexpr const char* kRegisterAllocatorPassName = "register";
protected:
- RegisterAllocator(ArenaAllocator* allocator,
+ RegisterAllocator(ScopedArenaAllocator* allocator,
CodeGenerator* codegen,
const SsaLivenessAnalysis& analysis);
@@ -88,7 +87,7 @@
// to find an optimal split position.
LiveInterval* SplitBetween(LiveInterval* interval, size_t from, size_t to);
- ArenaAllocator* const allocator_;
+ ScopedArenaAllocator* allocator_;
CodeGenerator* const codegen_;
const SsaLivenessAnalysis& liveness_;
};
diff --git a/compiler/optimizing/register_allocator_graph_color.cc b/compiler/optimizing/register_allocator_graph_color.cc
index 4ff7315..ad5248e 100644
--- a/compiler/optimizing/register_allocator_graph_color.cc
+++ b/compiler/optimizing/register_allocator_graph_color.cc
@@ -217,13 +217,12 @@
// and thus whether it is safe to prune it from the interference graph early on.
class InterferenceNode : public ArenaObject<kArenaAllocRegisterAllocator> {
public:
- InterferenceNode(ArenaAllocator* allocator,
- LiveInterval* interval,
+ InterferenceNode(LiveInterval* interval,
const SsaLivenessAnalysis& liveness)
: stage(NodeStage::kInitial),
interval_(interval),
- adjacent_nodes_(allocator->Adapter(kArenaAllocRegisterAllocator)),
- coalesce_opportunities_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+ adjacent_nodes_(nullptr),
+ coalesce_opportunities_(nullptr),
out_degree_(interval->HasRegister() ? std::numeric_limits<size_t>::max() : 0),
alias_(this),
spill_weight_(ComputeSpillWeight(interval, liveness)),
@@ -232,21 +231,26 @@
DCHECK(!interval->IsHighInterval()) << "Pair nodes should be represented by the low interval";
}
- void AddInterference(InterferenceNode* other, bool guaranteed_not_interfering_yet) {
+ void AddInterference(InterferenceNode* other,
+ bool guaranteed_not_interfering_yet,
+ ScopedArenaDeque<ScopedArenaVector<InterferenceNode*>>* storage) {
DCHECK(!IsPrecolored()) << "To save memory, fixed nodes should not have outgoing interferences";
DCHECK_NE(this, other) << "Should not create self loops in the interference graph";
DCHECK_EQ(this, alias_) << "Should not add interferences to a node that aliases another";
DCHECK_NE(stage, NodeStage::kPruned);
DCHECK_NE(other->stage, NodeStage::kPruned);
+ if (adjacent_nodes_ == nullptr) {
+ ScopedArenaVector<InterferenceNode*>::allocator_type adapter(storage->get_allocator());
+ storage->emplace_back(adapter);
+ adjacent_nodes_ = &storage->back();
+ }
if (guaranteed_not_interfering_yet) {
- DCHECK(std::find(adjacent_nodes_.begin(), adjacent_nodes_.end(), other)
- == adjacent_nodes_.end());
- adjacent_nodes_.push_back(other);
+ DCHECK(!ContainsElement(GetAdjacentNodes(), other));
+ adjacent_nodes_->push_back(other);
out_degree_ += EdgeWeightWith(other);
} else {
- auto it = std::find(adjacent_nodes_.begin(), adjacent_nodes_.end(), other);
- if (it == adjacent_nodes_.end()) {
- adjacent_nodes_.push_back(other);
+ if (!ContainsElement(GetAdjacentNodes(), other)) {
+ adjacent_nodes_->push_back(other);
out_degree_ += EdgeWeightWith(other);
}
}
@@ -255,26 +259,29 @@
void RemoveInterference(InterferenceNode* other) {
DCHECK_EQ(this, alias_) << "Should not remove interferences from a coalesced node";
DCHECK_EQ(other->stage, NodeStage::kPruned) << "Should only remove interferences when pruning";
- auto it = std::find(adjacent_nodes_.begin(), adjacent_nodes_.end(), other);
- if (it != adjacent_nodes_.end()) {
- adjacent_nodes_.erase(it);
- out_degree_ -= EdgeWeightWith(other);
+ if (adjacent_nodes_ != nullptr) {
+ auto it = std::find(adjacent_nodes_->begin(), adjacent_nodes_->end(), other);
+ if (it != adjacent_nodes_->end()) {
+ adjacent_nodes_->erase(it);
+ out_degree_ -= EdgeWeightWith(other);
+ }
}
}
bool ContainsInterference(InterferenceNode* other) const {
DCHECK(!IsPrecolored()) << "Should not query fixed nodes for interferences";
DCHECK_EQ(this, alias_) << "Should not query a coalesced node for interferences";
- auto it = std::find(adjacent_nodes_.begin(), adjacent_nodes_.end(), other);
- return it != adjacent_nodes_.end();
+ return ContainsElement(GetAdjacentNodes(), other);
}
LiveInterval* GetInterval() const {
return interval_;
}
- const ArenaVector<InterferenceNode*>& GetAdjacentNodes() const {
- return adjacent_nodes_;
+ ArrayRef<InterferenceNode*> GetAdjacentNodes() const {
+ return adjacent_nodes_ != nullptr
+ ? ArrayRef<InterferenceNode*>(*adjacent_nodes_)
+ : ArrayRef<InterferenceNode*>();
}
size_t GetOutDegree() const {
@@ -283,16 +290,22 @@
return out_degree_;
}
- void AddCoalesceOpportunity(CoalesceOpportunity* opportunity) {
- coalesce_opportunities_.push_back(opportunity);
+ void AddCoalesceOpportunity(CoalesceOpportunity* opportunity,
+ ScopedArenaDeque<ScopedArenaVector<CoalesceOpportunity*>>* storage) {
+ if (coalesce_opportunities_ == nullptr) {
+ ScopedArenaVector<CoalesceOpportunity*>::allocator_type adapter(storage->get_allocator());
+ storage->emplace_back(adapter);
+ coalesce_opportunities_ = &storage->back();
+ }
+ coalesce_opportunities_->push_back(opportunity);
}
void ClearCoalesceOpportunities() {
- coalesce_opportunities_.clear();
+ coalesce_opportunities_ = nullptr;
}
bool IsMoveRelated() const {
- for (CoalesceOpportunity* opportunity : coalesce_opportunities_) {
+ for (CoalesceOpportunity* opportunity : GetCoalesceOpportunities()) {
if (opportunity->stage == CoalesceStage::kWorklist ||
opportunity->stage == CoalesceStage::kActive) {
return true;
@@ -325,8 +338,10 @@
return alias_;
}
- const ArenaVector<CoalesceOpportunity*>& GetCoalesceOpportunities() const {
- return coalesce_opportunities_;
+ ArrayRef<CoalesceOpportunity*> GetCoalesceOpportunities() const {
+ return coalesce_opportunities_ != nullptr
+ ? ArrayRef<CoalesceOpportunity*>(*coalesce_opportunities_)
+ : ArrayRef<CoalesceOpportunity*>();
}
float GetSpillWeight() const {
@@ -361,10 +376,10 @@
// All nodes interfering with this one.
// We use an unsorted vector as a set, since a tree or hash set is too heavy for the
// set sizes that we encounter. Using a vector leads to much better performance.
- ArenaVector<InterferenceNode*> adjacent_nodes_;
+ ScopedArenaVector<InterferenceNode*>* adjacent_nodes_; // Owned by ColoringIteration.
// Interference nodes that this node should be coalesced with to reduce moves.
- ArenaVector<CoalesceOpportunity*> coalesce_opportunities_;
+ ScopedArenaVector<CoalesceOpportunity*>* coalesce_opportunities_; // Owned by ColoringIteration.
// The maximum number of colors with which this node could interfere. This could be more than
// the number of adjacent nodes if this is a pair node, or if some adjacent nodes are pair nodes.
@@ -416,7 +431,7 @@
class ColoringIteration {
public:
ColoringIteration(RegisterAllocatorGraphColor* register_allocator,
- ArenaAllocator* allocator,
+ ScopedArenaAllocator* allocator,
bool processing_core_regs,
size_t num_regs)
: register_allocator_(register_allocator),
@@ -430,15 +445,17 @@
freeze_worklist_(allocator->Adapter(kArenaAllocRegisterAllocator)),
spill_worklist_(HasGreaterNodePriority, allocator->Adapter(kArenaAllocRegisterAllocator)),
coalesce_worklist_(CoalesceOpportunity::CmpPriority,
- allocator->Adapter(kArenaAllocRegisterAllocator)) {}
+ allocator->Adapter(kArenaAllocRegisterAllocator)),
+ adjacent_nodes_links_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+ coalesce_opportunities_links_(allocator->Adapter(kArenaAllocRegisterAllocator)) {}
// Use the intervals collected from instructions to construct an
// interference graph mapping intervals to adjacency lists.
// Also, collect synthesized safepoint nodes, used to keep
// track of live intervals across safepoints.
// TODO: Should build safepoints elsewhere.
- void BuildInterferenceGraph(const ArenaVector<LiveInterval*>& intervals,
- const ArenaVector<InterferenceNode*>& physical_nodes);
+ void BuildInterferenceGraph(const ScopedArenaVector<LiveInterval*>& intervals,
+ const ScopedArenaVector<InterferenceNode*>& physical_nodes);
// Add coalesce opportunities to interference nodes.
void FindCoalesceOpportunities();
@@ -456,8 +473,8 @@
// Return prunable nodes.
// The register allocator will need to access prunable nodes after coloring
// in order to tell the code generator which registers have been assigned.
- const ArenaVector<InterferenceNode*>& GetPrunableNodes() const {
- return prunable_nodes_;
+ ArrayRef<InterferenceNode* const> GetPrunableNodes() const {
+ return ArrayRef<InterferenceNode* const>(prunable_nodes_);
}
private:
@@ -503,38 +520,46 @@
// needed to split intervals and assign spill slots.
RegisterAllocatorGraphColor* register_allocator_;
- // An arena allocator used for a single graph coloring attempt.
- ArenaAllocator* allocator_;
+ // A scoped arena allocator used for a single graph coloring attempt.
+ ScopedArenaAllocator* allocator_;
const bool processing_core_regs_;
const size_t num_regs_;
// A map from live intervals to interference nodes.
- ArenaHashMap<LiveInterval*, InterferenceNode*> interval_node_map_;
+ ScopedArenaHashMap<LiveInterval*, InterferenceNode*> interval_node_map_;
// Uncolored nodes that should be pruned from the interference graph.
- ArenaVector<InterferenceNode*> prunable_nodes_;
+ ScopedArenaVector<InterferenceNode*> prunable_nodes_;
// A stack of nodes pruned from the interference graph, waiting to be pruned.
- ArenaStdStack<InterferenceNode*> pruned_nodes_;
+ ScopedArenaStdStack<InterferenceNode*> pruned_nodes_;
// A queue containing low degree, non-move-related nodes that can pruned immediately.
- ArenaDeque<InterferenceNode*> simplify_worklist_;
+ ScopedArenaDeque<InterferenceNode*> simplify_worklist_;
// A queue containing low degree, move-related nodes.
- ArenaDeque<InterferenceNode*> freeze_worklist_;
+ ScopedArenaDeque<InterferenceNode*> freeze_worklist_;
// A queue containing high degree nodes.
// If we have to prune from the spill worklist, we cannot guarantee
// the pruned node a color, so we order the worklist by priority.
- ArenaPriorityQueue<InterferenceNode*, decltype(&HasGreaterNodePriority)> spill_worklist_;
+ ScopedArenaPriorityQueue<InterferenceNode*, decltype(&HasGreaterNodePriority)> spill_worklist_;
// A queue containing coalesce opportunities.
// We order the coalesce worklist by priority, since some coalesce opportunities (e.g., those
// inside of loops) are more important than others.
- ArenaPriorityQueue<CoalesceOpportunity*,
- decltype(&CoalesceOpportunity::CmpPriority)> coalesce_worklist_;
+ ScopedArenaPriorityQueue<CoalesceOpportunity*,
+ decltype(&CoalesceOpportunity::CmpPriority)> coalesce_worklist_;
+
+ // Storage for links to adjacent nodes for interference nodes.
+ // Using std::deque so that elements do not move when adding new ones.
+ ScopedArenaDeque<ScopedArenaVector<InterferenceNode*>> adjacent_nodes_links_;
+
+ // Storage for links to coalesce opportunities for interference nodes.
+ // Using std::deque so that elements do not move when adding new ones.
+ ScopedArenaDeque<ScopedArenaVector<CoalesceOpportunity*>> coalesce_opportunities_links_;
DISALLOW_COPY_AND_ASSIGN(ColoringIteration);
};
@@ -547,7 +572,7 @@
return static_cast<size_t>(InstructionSetPointerSize(codegen.GetInstructionSet())) / kVRegSize;
}
-RegisterAllocatorGraphColor::RegisterAllocatorGraphColor(ArenaAllocator* allocator,
+RegisterAllocatorGraphColor::RegisterAllocatorGraphColor(ScopedArenaAllocator* allocator,
CodeGenerator* codegen,
const SsaLivenessAnalysis& liveness,
bool iterative_move_coalescing)
@@ -574,8 +599,7 @@
physical_core_nodes_.resize(codegen_->GetNumberOfCoreRegisters(), nullptr);
for (size_t i = 0; i < codegen_->GetNumberOfCoreRegisters(); ++i) {
LiveInterval* interval = LiveInterval::MakeFixedInterval(allocator_, i, DataType::Type::kInt32);
- physical_core_nodes_[i] =
- new (allocator_) InterferenceNode(allocator_, interval, liveness);
+ physical_core_nodes_[i] = new (allocator_) InterferenceNode(interval, liveness);
physical_core_nodes_[i]->stage = NodeStage::kPrecolored;
core_intervals_.push_back(interval);
if (codegen_->IsBlockedCoreRegister(i)) {
@@ -587,8 +611,7 @@
for (size_t i = 0; i < codegen_->GetNumberOfFloatingPointRegisters(); ++i) {
LiveInterval* interval =
LiveInterval::MakeFixedInterval(allocator_, i, DataType::Type::kFloat32);
- physical_fp_nodes_[i] =
- new (allocator_) InterferenceNode(allocator_, interval, liveness);
+ physical_fp_nodes_[i] = new (allocator_) InterferenceNode(interval, liveness);
physical_fp_nodes_[i]->stage = NodeStage::kPrecolored;
fp_intervals_.push_back(interval);
if (codegen_->IsBlockedFloatingPointRegister(i)) {
@@ -597,12 +620,14 @@
}
}
+RegisterAllocatorGraphColor::~RegisterAllocatorGraphColor() {}
+
void RegisterAllocatorGraphColor::AllocateRegisters() {
// (1) Collect and prepare live intervals.
ProcessInstructions();
for (bool processing_core_regs : {true, false}) {
- ArenaVector<LiveInterval*>& intervals = processing_core_regs
+ ScopedArenaVector<LiveInterval*>& intervals = processing_core_regs
? core_intervals_
: fp_intervals_;
size_t num_registers = processing_core_regs
@@ -619,17 +644,15 @@
<< "should be prioritized over long ones, because they cannot be split further.)";
// Many data structures are cleared between graph coloring attempts, so we reduce
- // total memory usage by using a new arena allocator for each attempt.
- ArenaAllocator coloring_attempt_allocator(allocator_->GetArenaPool());
+ // total memory usage by using a new scoped arena allocator for each attempt.
+ ScopedArenaAllocator coloring_attempt_allocator(allocator_->GetArenaStack());
ColoringIteration iteration(this,
&coloring_attempt_allocator,
processing_core_regs,
num_registers);
- // (2) Build the interference graph. Also gather safepoints.
- ArenaVector<InterferenceNode*> safepoints(
- coloring_attempt_allocator.Adapter(kArenaAllocRegisterAllocator));
- ArenaVector<InterferenceNode*>& physical_nodes = processing_core_regs
+ // (2) Build the interference graph.
+ ScopedArenaVector<InterferenceNode*>& physical_nodes = processing_core_regs
? physical_core_nodes_
: physical_fp_nodes_;
iteration.BuildInterferenceGraph(intervals, physical_nodes);
@@ -691,7 +714,7 @@
} // for processing_core_instructions
// (6) Resolve locations and deconstruct SSA form.
- RegisterAllocationResolver(allocator_, codegen_, liveness_)
+ RegisterAllocationResolver(codegen_, liveness_)
.Resolve(ArrayRef<HInstruction* const>(safepoints_),
reserved_art_method_slots_ + reserved_out_slots_,
num_int_spill_slots_,
@@ -699,7 +722,7 @@
num_float_spill_slots_,
num_double_spill_slots_,
catch_phi_spill_slot_counter_,
- temp_intervals_);
+ ArrayRef<LiveInterval* const>(temp_intervals_));
if (kIsDebugBuild) {
Validate(/*log_fatal_on_failure*/ true);
@@ -708,8 +731,9 @@
bool RegisterAllocatorGraphColor::Validate(bool log_fatal_on_failure) {
for (bool processing_core_regs : {true, false}) {
- ArenaVector<LiveInterval*> intervals(
- allocator_->Adapter(kArenaAllocRegisterAllocatorValidate));
+ ScopedArenaAllocator allocator(allocator_->GetArenaStack());
+ ScopedArenaVector<LiveInterval*> intervals(
+ allocator.Adapter(kArenaAllocRegisterAllocatorValidate));
for (size_t i = 0; i < liveness_.GetNumberOfSsaValues(); ++i) {
HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
LiveInterval* interval = instruction->GetLiveInterval();
@@ -718,7 +742,7 @@
}
}
- ArenaVector<InterferenceNode*>& physical_nodes = processing_core_regs
+ ScopedArenaVector<InterferenceNode*>& physical_nodes = processing_core_regs
? physical_core_nodes_
: physical_fp_nodes_;
for (InterferenceNode* fixed : physical_nodes) {
@@ -742,11 +766,10 @@
+ num_float_spill_slots_
+ num_double_spill_slots_
+ catch_phi_spill_slot_counter_;
- bool ok = ValidateIntervals(intervals,
+ bool ok = ValidateIntervals(ArrayRef<LiveInterval* const>(intervals),
spill_slots,
reserved_art_method_slots_ + reserved_out_slots_,
*codegen_,
- allocator_,
processing_core_regs,
log_fatal_on_failure);
if (!ok) {
@@ -825,7 +848,7 @@
CheckForFixedOutput(instruction);
AllocateSpillSlotForCatchPhi(instruction);
- ArenaVector<LiveInterval*>& intervals = IsCoreInterval(interval)
+ ScopedArenaVector<LiveInterval*>& intervals = IsCoreInterval(interval)
? core_intervals_
: fp_intervals_;
if (interval->HasSpillSlot() || instruction->IsConstant()) {
@@ -1075,11 +1098,12 @@
} else if (to->IsPrecolored()) {
// It is important that only a single node represents a given fixed register in the
// interference graph. We retrieve that node here.
- const ArenaVector<InterferenceNode*>& physical_nodes = to->GetInterval()->IsFloatingPoint()
- ? register_allocator_->physical_fp_nodes_
- : register_allocator_->physical_core_nodes_;
+ const ScopedArenaVector<InterferenceNode*>& physical_nodes =
+ to->GetInterval()->IsFloatingPoint() ? register_allocator_->physical_fp_nodes_
+ : register_allocator_->physical_core_nodes_;
InterferenceNode* physical_node = physical_nodes[to->GetInterval()->GetRegister()];
- from->AddInterference(physical_node, /*guaranteed_not_interfering_yet*/ false);
+ from->AddInterference(
+ physical_node, /*guaranteed_not_interfering_yet*/ false, &adjacent_nodes_links_);
DCHECK_EQ(to->GetInterval()->GetRegister(), physical_node->GetInterval()->GetRegister());
DCHECK_EQ(to->GetAlias(), physical_node) << "Fixed nodes should alias the canonical fixed node";
@@ -1097,11 +1121,12 @@
physical_nodes[to->GetInterval()->GetHighInterval()->GetRegister()];
DCHECK_EQ(to->GetInterval()->GetHighInterval()->GetRegister(),
high_node->GetInterval()->GetRegister());
- from->AddInterference(high_node, /*guaranteed_not_interfering_yet*/ false);
+ from->AddInterference(
+ high_node, /*guaranteed_not_interfering_yet*/ false, &adjacent_nodes_links_);
}
} else {
// Standard interference between two uncolored nodes.
- from->AddInterference(to, guaranteed_not_interfering_yet);
+ from->AddInterference(to, guaranteed_not_interfering_yet, &adjacent_nodes_links_);
}
if (both_directions) {
@@ -1156,8 +1181,8 @@
}
void ColoringIteration::BuildInterferenceGraph(
- const ArenaVector<LiveInterval*>& intervals,
- const ArenaVector<InterferenceNode*>& physical_nodes) {
+ const ScopedArenaVector<LiveInterval*>& intervals,
+ const ScopedArenaVector<InterferenceNode*>& physical_nodes) {
DCHECK(interval_node_map_.Empty() && prunable_nodes_.empty());
// Build the interference graph efficiently by ordering range endpoints
// by position and doing a linear sweep to find interferences. (That is, we
@@ -1171,7 +1196,7 @@
//
// For simplicity, we create a tuple for each endpoint, and then sort the tuples.
// Tuple contents: (position, is_range_beginning, node).
- ArenaVector<std::tuple<size_t, bool, InterferenceNode*>> range_endpoints(
+ ScopedArenaVector<std::tuple<size_t, bool, InterferenceNode*>> range_endpoints(
allocator_->Adapter(kArenaAllocRegisterAllocator));
// We reserve plenty of space to avoid excessive copying.
@@ -1181,8 +1206,8 @@
for (LiveInterval* sibling = parent; sibling != nullptr; sibling = sibling->GetNextSibling()) {
LiveRange* range = sibling->GetFirstRange();
if (range != nullptr) {
- InterferenceNode* node = new (allocator_) InterferenceNode(
- allocator_, sibling, register_allocator_->liveness_);
+ InterferenceNode* node =
+ new (allocator_) InterferenceNode(sibling, register_allocator_->liveness_);
interval_node_map_.Insert(std::make_pair(sibling, node));
if (sibling->HasRegister()) {
@@ -1217,8 +1242,7 @@
});
// Nodes live at the current position in the linear sweep.
- ArenaVector<InterferenceNode*> live(
- allocator_->Adapter(kArenaAllocRegisterAllocator));
+ ScopedArenaVector<InterferenceNode*> live(allocator_->Adapter(kArenaAllocRegisterAllocator));
// Linear sweep. When we encounter the beginning of a range, we add the corresponding node to the
// live set. When we encounter the end of a range, we remove the corresponding node
@@ -1261,8 +1285,8 @@
<< "Nodes of different memory widths should never be coalesced";
CoalesceOpportunity* opportunity =
new (allocator_) CoalesceOpportunity(a, b, kind, position, register_allocator_->liveness_);
- a->AddCoalesceOpportunity(opportunity);
- b->AddCoalesceOpportunity(opportunity);
+ a->AddCoalesceOpportunity(opportunity, &coalesce_opportunities_links_);
+ b->AddCoalesceOpportunity(opportunity, &coalesce_opportunities_links_);
coalesce_worklist_.push(opportunity);
}
@@ -1332,7 +1356,7 @@
// Coalesce phi inputs with the corresponding output.
HInstruction* defined_by = interval->GetDefinedBy();
if (defined_by != nullptr && defined_by->IsPhi()) {
- const ArenaVector<HBasicBlock*>& predecessors = defined_by->GetBlock()->GetPredecessors();
+ ArrayRef<HBasicBlock* const> predecessors(defined_by->GetBlock()->GetPredecessors());
HInputsRef inputs = defined_by->GetInputs();
for (size_t i = 0, e = inputs.size(); i < e; ++i) {
@@ -1675,7 +1699,7 @@
// Add coalesce opportunities.
for (CoalesceOpportunity* opportunity : from->GetCoalesceOpportunities()) {
if (opportunity->stage != CoalesceStage::kDefunct) {
- into->AddCoalesceOpportunity(opportunity);
+ into->AddCoalesceOpportunity(opportunity, &coalesce_opportunities_links_);
}
}
EnableCoalesceOpportunities(from);
@@ -1729,7 +1753,7 @@
// Build a mask with a bit set for each register assigned to some
// interval in `intervals`.
template <typename Container>
-static std::bitset<kMaxNumRegs> BuildConflictMask(Container& intervals) {
+static std::bitset<kMaxNumRegs> BuildConflictMask(const Container& intervals) {
std::bitset<kMaxNumRegs> conflict_mask;
for (InterferenceNode* adjacent : intervals) {
LiveInterval* conflicting = adjacent->GetInterval();
@@ -1765,7 +1789,7 @@
bool ColoringIteration::ColorInterferenceGraph() {
DCHECK_LE(num_regs_, kMaxNumRegs) << "kMaxNumRegs is too small";
- ArenaVector<LiveInterval*> colored_intervals(
+ ScopedArenaVector<LiveInterval*> colored_intervals(
allocator_->Adapter(kArenaAllocRegisterAllocator));
bool successful = true;
@@ -1888,16 +1912,18 @@
return successful;
}
-void RegisterAllocatorGraphColor::AllocateSpillSlots(const ArenaVector<InterferenceNode*>& nodes) {
+void RegisterAllocatorGraphColor::AllocateSpillSlots(ArrayRef<InterferenceNode* const> nodes) {
// The register allocation resolver will organize the stack based on value type,
// so we assign stack slots for each value type separately.
- ArenaVector<LiveInterval*> double_intervals(allocator_->Adapter(kArenaAllocRegisterAllocator));
- ArenaVector<LiveInterval*> long_intervals(allocator_->Adapter(kArenaAllocRegisterAllocator));
- ArenaVector<LiveInterval*> float_intervals(allocator_->Adapter(kArenaAllocRegisterAllocator));
- ArenaVector<LiveInterval*> int_intervals(allocator_->Adapter(kArenaAllocRegisterAllocator));
+ ScopedArenaAllocator allocator(allocator_->GetArenaStack());
+ ScopedArenaAllocatorAdapter<void> adapter = allocator.Adapter(kArenaAllocRegisterAllocator);
+ ScopedArenaVector<LiveInterval*> double_intervals(adapter);
+ ScopedArenaVector<LiveInterval*> long_intervals(adapter);
+ ScopedArenaVector<LiveInterval*> float_intervals(adapter);
+ ScopedArenaVector<LiveInterval*> int_intervals(adapter);
// The set of parent intervals already handled.
- ArenaSet<LiveInterval*> seen(allocator_->Adapter(kArenaAllocRegisterAllocator));
+ ScopedArenaSet<LiveInterval*> seen(adapter);
// Find nodes that need spill slots.
for (InterferenceNode* node : nodes) {
@@ -1940,6 +1966,7 @@
case DataType::Type::kReference:
case DataType::Type::kInt32:
case DataType::Type::kUint16:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kBool:
case DataType::Type::kInt16:
@@ -1953,23 +1980,24 @@
}
// Color spill slots for each value type.
- ColorSpillSlots(&double_intervals, &num_double_spill_slots_);
- ColorSpillSlots(&long_intervals, &num_long_spill_slots_);
- ColorSpillSlots(&float_intervals, &num_float_spill_slots_);
- ColorSpillSlots(&int_intervals, &num_int_spill_slots_);
+ ColorSpillSlots(ArrayRef<LiveInterval* const>(double_intervals), &num_double_spill_slots_);
+ ColorSpillSlots(ArrayRef<LiveInterval* const>(long_intervals), &num_long_spill_slots_);
+ ColorSpillSlots(ArrayRef<LiveInterval* const>(float_intervals), &num_float_spill_slots_);
+ ColorSpillSlots(ArrayRef<LiveInterval* const>(int_intervals), &num_int_spill_slots_);
}
-void RegisterAllocatorGraphColor::ColorSpillSlots(ArenaVector<LiveInterval*>* intervals,
- size_t* num_stack_slots_used) {
+void RegisterAllocatorGraphColor::ColorSpillSlots(ArrayRef<LiveInterval* const> intervals,
+ /* out */ size_t* num_stack_slots_used) {
// We cannot use the original interference graph here because spill slots are assigned to
// all of the siblings of an interval, whereas an interference node represents only a single
// sibling. So, we assign spill slots linear-scan-style by sorting all the interval endpoints
// by position, and assigning the lowest spill slot available when we encounter an interval
// beginning. We ignore lifetime holes for simplicity.
- ArenaVector<std::tuple<size_t, bool, LiveInterval*>> interval_endpoints(
- allocator_->Adapter(kArenaAllocRegisterAllocator));
+ ScopedArenaAllocator allocator(allocator_->GetArenaStack());
+ ScopedArenaVector<std::tuple<size_t, bool, LiveInterval*>> interval_endpoints(
+ allocator.Adapter(kArenaAllocRegisterAllocator));
- for (LiveInterval* parent_interval : *intervals) {
+ for (LiveInterval* parent_interval : intervals) {
DCHECK(parent_interval->IsParent());
DCHECK(!parent_interval->HasSpillSlot());
size_t start = parent_interval->GetStart();
@@ -1989,7 +2017,7 @@
< std::tie(std::get<0>(rhs), std::get<1>(rhs));
});
- ArenaBitVector taken(allocator_, 0, true);
+ ArenaBitVector taken(&allocator, 0, true, kArenaAllocRegisterAllocator);
for (auto it = interval_endpoints.begin(), end = interval_endpoints.end(); it != end; ++it) {
// Extract information from the current tuple.
LiveInterval* parent_interval;
diff --git a/compiler/optimizing/register_allocator_graph_color.h b/compiler/optimizing/register_allocator_graph_color.h
index 3f6d674..3072c92 100644
--- a/compiler/optimizing/register_allocator_graph_color.h
+++ b/compiler/optimizing/register_allocator_graph_color.h
@@ -18,9 +18,10 @@
#define ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_GRAPH_COLOR_H_
#include "arch/instruction_set.h"
-#include "base/arena_containers.h"
#include "base/arena_object.h"
+#include "base/array_ref.h"
#include "base/macros.h"
+#include "base/scoped_arena_containers.h"
#include "register_allocator.h"
namespace art {
@@ -85,11 +86,11 @@
*/
class RegisterAllocatorGraphColor : public RegisterAllocator {
public:
- RegisterAllocatorGraphColor(ArenaAllocator* allocator,
+ RegisterAllocatorGraphColor(ScopedArenaAllocator* allocator,
CodeGenerator* codegen,
const SsaLivenessAnalysis& analysis,
bool iterative_move_coalescing = true);
- ~RegisterAllocatorGraphColor() OVERRIDE {}
+ ~RegisterAllocatorGraphColor() OVERRIDE;
void AllocateRegisters() OVERRIDE;
@@ -141,11 +142,10 @@
// Assigns stack slots to a list of intervals, ensuring that interfering intervals are not
// assigned the same stack slot.
- void ColorSpillSlots(ArenaVector<LiveInterval*>* nodes,
- size_t* num_stack_slots_used);
+ void ColorSpillSlots(ArrayRef<LiveInterval* const> nodes, /* out */ size_t* num_stack_slots_used);
// Provide stack slots to nodes that need them.
- void AllocateSpillSlots(const ArenaVector<InterferenceNode*>& nodes);
+ void AllocateSpillSlots(ArrayRef<InterferenceNode* const> nodes);
// Whether iterative move coalescing should be performed. Iterative move coalescing
// improves code quality, but increases compile time.
@@ -154,19 +154,19 @@
// Live intervals, split by kind (core and floating point).
// These should not contain high intervals, as those are represented by
// the corresponding low interval throughout register allocation.
- ArenaVector<LiveInterval*> core_intervals_;
- ArenaVector<LiveInterval*> fp_intervals_;
+ ScopedArenaVector<LiveInterval*> core_intervals_;
+ ScopedArenaVector<LiveInterval*> fp_intervals_;
// Intervals for temporaries, saved for special handling in the resolution phase.
- ArenaVector<LiveInterval*> temp_intervals_;
+ ScopedArenaVector<LiveInterval*> temp_intervals_;
// Safepoints, saved for special handling while processing instructions.
- ArenaVector<HInstruction*> safepoints_;
+ ScopedArenaVector<HInstruction*> safepoints_;
// Interference nodes representing specific registers. These are "pre-colored" nodes
// in the interference graph.
- ArenaVector<InterferenceNode*> physical_core_nodes_;
- ArenaVector<InterferenceNode*> physical_fp_nodes_;
+ ScopedArenaVector<InterferenceNode*> physical_core_nodes_;
+ ScopedArenaVector<InterferenceNode*> physical_fp_nodes_;
// Allocated stack slot counters.
size_t num_int_spill_slots_;
diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc
index 2012cd5..cfe63bd 100644
--- a/compiler/optimizing/register_allocator_linear_scan.cc
+++ b/compiler/optimizing/register_allocator_linear_scan.cc
@@ -40,7 +40,7 @@
return GetHighForLowRegister(low->GetRegister()) != low->GetHighInterval()->GetRegister();
}
-RegisterAllocatorLinearScan::RegisterAllocatorLinearScan(ArenaAllocator* allocator,
+RegisterAllocatorLinearScan::RegisterAllocatorLinearScan(ScopedArenaAllocator* allocator,
CodeGenerator* codegen,
const SsaLivenessAnalysis& liveness)
: RegisterAllocator(allocator, codegen, liveness),
@@ -81,6 +81,8 @@
reserved_out_slots_ = ptr_size / kVRegSize + codegen->GetGraph()->GetMaximumNumberOfOutVRegs();
}
+RegisterAllocatorLinearScan::~RegisterAllocatorLinearScan() {}
+
static bool ShouldProcess(bool processing_core_registers, LiveInterval* interval) {
if (interval == nullptr) return false;
bool is_core_register = (interval->GetType() != DataType::Type::kFloat64)
@@ -90,7 +92,7 @@
void RegisterAllocatorLinearScan::AllocateRegisters() {
AllocateRegistersInternal();
- RegisterAllocationResolver(allocator_, codegen_, liveness_)
+ RegisterAllocationResolver(codegen_, liveness_)
.Resolve(ArrayRef<HInstruction* const>(safepoints_),
reserved_out_slots_,
int_spill_slots_.size(),
@@ -98,7 +100,7 @@
float_spill_slots_.size(),
double_spill_slots_.size(),
catch_phi_spill_slots_,
- temp_intervals_);
+ ArrayRef<LiveInterval* const>(temp_intervals_));
if (kIsDebugBuild) {
processing_core_registers_ = true;
@@ -298,7 +300,7 @@
LiveInterval* current = instruction->GetLiveInterval();
if (current == nullptr) return;
- ArenaVector<LiveInterval*>& unhandled = core_register
+ ScopedArenaVector<LiveInterval*>& unhandled = core_register
? unhandled_core_intervals_
: unhandled_fp_intervals_;
@@ -425,7 +427,9 @@
bool RegisterAllocatorLinearScan::ValidateInternal(bool log_fatal_on_failure) const {
// To simplify unit testing, we eagerly create the array of intervals, and
// call the helper method.
- ArenaVector<LiveInterval*> intervals(allocator_->Adapter(kArenaAllocRegisterAllocatorValidate));
+ ScopedArenaAllocator allocator(allocator_->GetArenaStack());
+ ScopedArenaVector<LiveInterval*> intervals(
+ allocator.Adapter(kArenaAllocRegisterAllocatorValidate));
for (size_t i = 0; i < liveness_.GetNumberOfSsaValues(); ++i) {
HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
if (ShouldProcess(processing_core_registers_, instruction->GetLiveInterval())) {
@@ -433,7 +437,7 @@
}
}
- const ArenaVector<LiveInterval*>* physical_register_intervals = processing_core_registers_
+ const ScopedArenaVector<LiveInterval*>* physical_register_intervals = processing_core_registers_
? &physical_core_register_intervals_
: &physical_fp_register_intervals_;
for (LiveInterval* fixed : *physical_register_intervals) {
@@ -448,8 +452,12 @@
}
}
- return ValidateIntervals(intervals, GetNumberOfSpillSlots(), reserved_out_slots_, *codegen_,
- allocator_, processing_core_registers_, log_fatal_on_failure);
+ return ValidateIntervals(ArrayRef<LiveInterval* const>(intervals),
+ GetNumberOfSpillSlots(),
+ reserved_out_slots_,
+ *codegen_,
+ processing_core_registers_,
+ log_fatal_on_failure);
}
void RegisterAllocatorLinearScan::DumpInterval(std::ostream& stream, LiveInterval* interval) const {
@@ -813,7 +821,7 @@
// Remove interval and its other half if any. Return iterator to the following element.
static ArenaVector<LiveInterval*>::iterator RemoveIntervalAndPotentialOtherHalf(
- ArenaVector<LiveInterval*>* intervals, ArenaVector<LiveInterval*>::iterator pos) {
+ ScopedArenaVector<LiveInterval*>* intervals, ScopedArenaVector<LiveInterval*>::iterator pos) {
DCHECK(intervals->begin() <= pos && pos < intervals->end());
LiveInterval* interval = *pos;
if (interval->IsLowInterval()) {
@@ -1044,7 +1052,8 @@
}
}
-void RegisterAllocatorLinearScan::AddSorted(ArenaVector<LiveInterval*>* array, LiveInterval* interval) {
+void RegisterAllocatorLinearScan::AddSorted(ScopedArenaVector<LiveInterval*>* array,
+ LiveInterval* interval) {
DCHECK(!interval->IsFixed() && !interval->HasSpillSlot());
size_t insert_at = 0;
for (size_t i = array->size(); i > 0; --i) {
@@ -1102,7 +1111,7 @@
return;
}
- ArenaVector<size_t>* spill_slots = nullptr;
+ ScopedArenaVector<size_t>* spill_slots = nullptr;
switch (interval->GetType()) {
case DataType::Type::kFloat64:
spill_slots = &double_spill_slots_;
@@ -1116,6 +1125,7 @@
case DataType::Type::kReference:
case DataType::Type::kInt32:
case DataType::Type::kUint16:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kBool:
case DataType::Type::kInt16:
diff --git a/compiler/optimizing/register_allocator_linear_scan.h b/compiler/optimizing/register_allocator_linear_scan.h
index 9c650a4..36788b7 100644
--- a/compiler/optimizing/register_allocator_linear_scan.h
+++ b/compiler/optimizing/register_allocator_linear_scan.h
@@ -18,7 +18,7 @@
#define ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_LINEAR_SCAN_H_
#include "arch/instruction_set.h"
-#include "base/arena_containers.h"
+#include "base/scoped_arena_containers.h"
#include "base/macros.h"
#include "register_allocator.h"
@@ -39,10 +39,10 @@
*/
class RegisterAllocatorLinearScan : public RegisterAllocator {
public:
- RegisterAllocatorLinearScan(ArenaAllocator* allocator,
+ RegisterAllocatorLinearScan(ScopedArenaAllocator* allocator,
CodeGenerator* codegen,
const SsaLivenessAnalysis& analysis);
- ~RegisterAllocatorLinearScan() OVERRIDE {}
+ ~RegisterAllocatorLinearScan() OVERRIDE;
void AllocateRegisters() OVERRIDE;
@@ -70,7 +70,7 @@
bool AllocateBlockedReg(LiveInterval* interval);
// Add `interval` in the given sorted list.
- static void AddSorted(ArenaVector<LiveInterval*>* array, LiveInterval* interval);
+ static void AddSorted(ScopedArenaVector<LiveInterval*>* array, LiveInterval* interval);
// Returns whether `reg` is blocked by the code generator.
bool IsBlocked(int reg) const;
@@ -107,43 +107,43 @@
// List of intervals for core registers that must be processed, ordered by start
// position. Last entry is the interval that has the lowest start position.
// This list is initially populated before doing the linear scan.
- ArenaVector<LiveInterval*> unhandled_core_intervals_;
+ ScopedArenaVector<LiveInterval*> unhandled_core_intervals_;
// List of intervals for floating-point registers. Same comments as above.
- ArenaVector<LiveInterval*> unhandled_fp_intervals_;
+ ScopedArenaVector<LiveInterval*> unhandled_fp_intervals_;
// Currently processed list of unhandled intervals. Either `unhandled_core_intervals_`
// or `unhandled_fp_intervals_`.
- ArenaVector<LiveInterval*>* unhandled_;
+ ScopedArenaVector<LiveInterval*>* unhandled_;
// List of intervals that have been processed.
- ArenaVector<LiveInterval*> handled_;
+ ScopedArenaVector<LiveInterval*> handled_;
// List of intervals that are currently active when processing a new live interval.
// That is, they have a live range that spans the start of the new interval.
- ArenaVector<LiveInterval*> active_;
+ ScopedArenaVector<LiveInterval*> active_;
// List of intervals that are currently inactive when processing a new live interval.
// That is, they have a lifetime hole that spans the start of the new interval.
- ArenaVector<LiveInterval*> inactive_;
+ ScopedArenaVector<LiveInterval*> inactive_;
// Fixed intervals for physical registers. Such intervals cover the positions
// where an instruction requires a specific register.
- ArenaVector<LiveInterval*> physical_core_register_intervals_;
- ArenaVector<LiveInterval*> physical_fp_register_intervals_;
+ ScopedArenaVector<LiveInterval*> physical_core_register_intervals_;
+ ScopedArenaVector<LiveInterval*> physical_fp_register_intervals_;
// Intervals for temporaries. Such intervals cover the positions
// where an instruction requires a temporary.
- ArenaVector<LiveInterval*> temp_intervals_;
+ ScopedArenaVector<LiveInterval*> temp_intervals_;
// The spill slots allocated for live intervals. We ensure spill slots
// are typed to avoid (1) doing moves and swaps between two different kinds
// of registers, and (2) swapping between a single stack slot and a double
// stack slot. This simplifies the parallel move resolver.
- ArenaVector<size_t> int_spill_slots_;
- ArenaVector<size_t> long_spill_slots_;
- ArenaVector<size_t> float_spill_slots_;
- ArenaVector<size_t> double_spill_slots_;
+ ScopedArenaVector<size_t> int_spill_slots_;
+ ScopedArenaVector<size_t> long_spill_slots_;
+ ScopedArenaVector<size_t> float_spill_slots_;
+ ScopedArenaVector<size_t> double_spill_slots_;
// Spill slots allocated to catch phis. This category is special-cased because
// (1) slots are allocated prior to linear scan and in reverse linear order,
@@ -151,7 +151,7 @@
size_t catch_phi_spill_slots_;
// Instructions that need a safepoint.
- ArenaVector<HInstruction*> safepoints_;
+ ScopedArenaVector<HInstruction*> safepoints_;
// True if processing core registers. False if processing floating
// point registers.
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 59987e2..69ed8c7 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -38,12 +38,36 @@
// Note: the register allocator tests rely on the fact that constants have live
// intervals and registers get allocated to them.
-class RegisterAllocatorTest : public CommonCompilerTest {
+class RegisterAllocatorTest : public OptimizingUnitTest {
protected:
// These functions need to access private variables of LocationSummary, so we declare it
// as a member of RegisterAllocatorTest, which we make a friend class.
- static void SameAsFirstInputHint(Strategy strategy);
- static void ExpectedInRegisterHint(Strategy strategy);
+ void SameAsFirstInputHint(Strategy strategy);
+ void ExpectedInRegisterHint(Strategy strategy);
+
+ // Helper functions that make use of the OptimizingUnitTest's members.
+ bool Check(const uint16_t* data, Strategy strategy);
+ void CFG1(Strategy strategy);
+ void Loop1(Strategy strategy);
+ void Loop2(Strategy strategy);
+ void Loop3(Strategy strategy);
+ void DeadPhi(Strategy strategy);
+ HGraph* BuildIfElseWithPhi(HPhi** phi, HInstruction** input1, HInstruction** input2);
+ void PhiHint(Strategy strategy);
+ HGraph* BuildFieldReturn(HInstruction** field, HInstruction** ret);
+ HGraph* BuildTwoSubs(HInstruction** first_sub, HInstruction** second_sub);
+ HGraph* BuildDiv(HInstruction** div);
+ void ExpectedExactInRegisterAndSameOutputHint(Strategy strategy);
+
+ bool ValidateIntervals(const ScopedArenaVector<LiveInterval*>& intervals,
+ const CodeGenerator& codegen) {
+ return RegisterAllocator::ValidateIntervals(ArrayRef<LiveInterval* const>(intervals),
+ /* number_of_spill_slots */ 0u,
+ /* number_of_out_slots */ 0u,
+ codegen,
+ /* processing_core_registers */ true,
+ /* log_fatal_on_failure */ false);
+ }
};
// This macro should include all register allocation strategies that should be tested.
@@ -55,17 +79,15 @@
test_name(Strategy::kRegisterAllocatorGraphColor);\
}
-static bool Check(const uint16_t* data, Strategy strategy) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+bool RegisterAllocatorTest::Check(const uint16_t* data, Strategy strategy) {
+ HGraph* graph = CreateCFG(data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
return register_allocator->Validate(false);
}
@@ -75,95 +97,82 @@
* tests are based on this validation method.
*/
TEST_F(RegisterAllocatorTest, ValidateIntervals) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
+ HGraph* graph = CreateGraph();
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- ArenaVector<LiveInterval*> intervals(allocator.Adapter());
+ ScopedArenaVector<LiveInterval*> intervals(GetScopedAllocator()->Adapter());
// Test with two intervals of the same range.
{
static constexpr size_t ranges[][2] = {{0, 42}};
- intervals.push_back(BuildInterval(ranges, arraysize(ranges), &allocator, 0));
- intervals.push_back(BuildInterval(ranges, arraysize(ranges), &allocator, 1));
- ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ intervals.push_back(BuildInterval(ranges, arraysize(ranges), GetScopedAllocator(), 0));
+ intervals.push_back(BuildInterval(ranges, arraysize(ranges), GetScopedAllocator(), 1));
+ ASSERT_TRUE(ValidateIntervals(intervals, codegen));
intervals[1]->SetRegister(0);
- ASSERT_FALSE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ ASSERT_FALSE(ValidateIntervals(intervals, codegen));
intervals.clear();
}
// Test with two non-intersecting intervals.
{
static constexpr size_t ranges1[][2] = {{0, 42}};
- intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0));
+ intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), GetScopedAllocator(), 0));
static constexpr size_t ranges2[][2] = {{42, 43}};
- intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
- ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), GetScopedAllocator(), 1));
+ ASSERT_TRUE(ValidateIntervals(intervals, codegen));
intervals[1]->SetRegister(0);
- ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ ASSERT_TRUE(ValidateIntervals(intervals, codegen));
intervals.clear();
}
// Test with two non-intersecting intervals, with one with a lifetime hole.
{
static constexpr size_t ranges1[][2] = {{0, 42}, {45, 48}};
- intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0));
+ intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), GetScopedAllocator(), 0));
static constexpr size_t ranges2[][2] = {{42, 43}};
- intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
- ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), GetScopedAllocator(), 1));
+ ASSERT_TRUE(ValidateIntervals(intervals, codegen));
intervals[1]->SetRegister(0);
- ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ ASSERT_TRUE(ValidateIntervals(intervals, codegen));
intervals.clear();
}
// Test with intersecting intervals.
{
static constexpr size_t ranges1[][2] = {{0, 42}, {44, 48}};
- intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0));
+ intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), GetScopedAllocator(), 0));
static constexpr size_t ranges2[][2] = {{42, 47}};
- intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
- ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), GetScopedAllocator(), 1));
+ ASSERT_TRUE(ValidateIntervals(intervals, codegen));
intervals[1]->SetRegister(0);
- ASSERT_FALSE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ ASSERT_FALSE(ValidateIntervals(intervals, codegen));
intervals.clear();
}
// Test with siblings.
{
static constexpr size_t ranges1[][2] = {{0, 42}, {44, 48}};
- intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0));
+ intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), GetScopedAllocator(), 0));
intervals[0]->SplitAt(43);
static constexpr size_t ranges2[][2] = {{42, 47}};
- intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
- ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), GetScopedAllocator(), 1));
+ ASSERT_TRUE(ValidateIntervals(intervals, codegen));
intervals[1]->SetRegister(0);
// Sibling of the first interval has no register allocated to it.
- ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ ASSERT_TRUE(ValidateIntervals(intervals, codegen));
intervals[0]->GetNextSibling()->SetRegister(0);
- ASSERT_FALSE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ ASSERT_FALSE(ValidateIntervals(intervals, codegen));
}
}
-static void CFG1(Strategy strategy) {
+void RegisterAllocatorTest::CFG1(Strategy strategy) {
/*
* Test the following snippet:
* return 0;
@@ -185,7 +194,7 @@
TEST_ALL_STRATEGIES(CFG1);
-static void Loop1(Strategy strategy) {
+void RegisterAllocatorTest::Loop1(Strategy strategy) {
/*
* Test the following snippet:
* int a = 0;
@@ -226,7 +235,7 @@
TEST_ALL_STRATEGIES(Loop1);
-static void Loop2(Strategy strategy) {
+void RegisterAllocatorTest::Loop2(Strategy strategy) {
/*
* Test the following snippet:
* int a = 0;
@@ -277,7 +286,7 @@
TEST_ALL_STRATEGIES(Loop2);
-static void Loop3(Strategy strategy) {
+void RegisterAllocatorTest::Loop3(Strategy strategy) {
/*
* Test the following snippet:
* int a = 0
@@ -314,16 +323,14 @@
Instruction::MOVE | 1 << 12 | 0 << 8,
Instruction::GOTO | 0xF900);
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_TRUE(register_allocator->Validate(false));
@@ -351,13 +358,11 @@
Instruction::XOR_INT_LIT8 | 1 << 8, 1 << 8 | 1,
Instruction::RETURN_VOID);
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
HXor* first_xor = graph->GetBlocks()[1]->GetFirstInstruction()->AsXor();
@@ -383,7 +388,7 @@
ASSERT_EQ(new_interval->FirstRegisterUse(), last_xor->GetLifetimePosition());
}
-static void DeadPhi(Strategy strategy) {
+void RegisterAllocatorTest::DeadPhi(Strategy strategy) {
/* Test for a dead loop phi taking as back-edge input a phi that also has
* this loop phi as input. Walking backwards in SsaDeadPhiElimination
* does not solve the problem because the loop phi will be visited last.
@@ -405,17 +410,15 @@
Instruction::GOTO | 0xFD00,
Instruction::RETURN_VOID);
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
SsaDeadPhiElimination(graph).Run();
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_TRUE(register_allocator->Validate(false));
}
@@ -433,16 +436,14 @@
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN);
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+ HGraph* graph = CreateCFG(data);
SsaDeadPhiElimination(graph).Run();
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
- RegisterAllocatorLinearScan register_allocator(&allocator, &codegen, liveness);
+ RegisterAllocatorLinearScan register_allocator(GetScopedAllocator(), &codegen, liveness);
// Add an artifical range to cover the temps that will be put in the unhandled list.
LiveInterval* unhandled = graph->GetEntryBlock()->GetFirstInstruction()->GetLiveInterval();
@@ -461,20 +462,21 @@
// Add three temps holding the same register, and starting at different positions.
// Put the one that should be picked in the middle of the inactive list to ensure
// we do not depend on an order.
- LiveInterval* interval = LiveInterval::MakeFixedInterval(&allocator, 0, DataType::Type::kInt32);
+ LiveInterval* interval =
+ LiveInterval::MakeFixedInterval(GetScopedAllocator(), 0, DataType::Type::kInt32);
interval->AddRange(40, 50);
register_allocator.inactive_.push_back(interval);
- interval = LiveInterval::MakeFixedInterval(&allocator, 0, DataType::Type::kInt32);
+ interval = LiveInterval::MakeFixedInterval(GetScopedAllocator(), 0, DataType::Type::kInt32);
interval->AddRange(20, 30);
register_allocator.inactive_.push_back(interval);
- interval = LiveInterval::MakeFixedInterval(&allocator, 0, DataType::Type::kInt32);
+ interval = LiveInterval::MakeFixedInterval(GetScopedAllocator(), 0, DataType::Type::kInt32);
interval->AddRange(60, 70);
register_allocator.inactive_.push_back(interval);
register_allocator.number_of_registers_ = 1;
- register_allocator.registers_array_ = allocator.AllocArray<size_t>(1);
+ register_allocator.registers_array_ = GetAllocator()->AllocArray<size_t>(1);
register_allocator.processing_core_registers_ = true;
register_allocator.unhandled_ = ®ister_allocator.unhandled_core_intervals_;
@@ -487,36 +489,35 @@
ASSERT_EQ(20u, register_allocator.unhandled_->front()->GetStart());
}
-static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator,
- HPhi** phi,
- HInstruction** input1,
- HInstruction** input2) {
- HGraph* graph = CreateGraph(allocator);
- HBasicBlock* entry = new (allocator) HBasicBlock(graph);
+HGraph* RegisterAllocatorTest::BuildIfElseWithPhi(HPhi** phi,
+ HInstruction** input1,
+ HInstruction** input2) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (allocator) HParameterValue(
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
entry->AddInstruction(parameter);
- HBasicBlock* block = new (allocator) HBasicBlock(graph);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
entry->AddSuccessor(block);
- HInstruction* test = new (allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kBool,
- MemberOffset(22),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0);
+ HInstruction* test = new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kBool,
+ MemberOffset(22),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0);
block->AddInstruction(test);
- block->AddInstruction(new (allocator) HIf(test));
- HBasicBlock* then = new (allocator) HBasicBlock(graph);
- HBasicBlock* else_ = new (allocator) HBasicBlock(graph);
- HBasicBlock* join = new (allocator) HBasicBlock(graph);
+ block->AddInstruction(new (GetAllocator()) HIf(test));
+ HBasicBlock* then = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* else_ = new (GetAllocator()) HBasicBlock(graph);
+ HBasicBlock* join = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(then);
graph->AddBlock(else_);
graph->AddBlock(join);
@@ -525,32 +526,32 @@
block->AddSuccessor(else_);
then->AddSuccessor(join);
else_->AddSuccessor(join);
- then->AddInstruction(new (allocator) HGoto());
- else_->AddInstruction(new (allocator) HGoto());
+ then->AddInstruction(new (GetAllocator()) HGoto());
+ else_->AddInstruction(new (GetAllocator()) HGoto());
- *phi = new (allocator) HPhi(allocator, 0, 0, DataType::Type::kInt32);
+ *phi = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32);
join->AddPhi(*phi);
- *input1 = new (allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0);
- *input2 = new (allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0);
+ *input1 = new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kInt32,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0);
+ *input2 = new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kInt32,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0);
then->AddInstruction(*input1);
else_->AddInstruction(*input2);
- join->AddInstruction(new (allocator) HExit());
+ join->AddInstruction(new (GetAllocator()) HExit());
(*phi)->AddInput(*input1);
(*phi)->AddInput(*input2);
@@ -559,23 +560,21 @@
return graph;
}
-static void PhiHint(Strategy strategy) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
+void RegisterAllocatorTest::PhiHint(Strategy strategy) {
HPhi *phi;
HInstruction *input1, *input2;
{
- HGraph* graph = BuildIfElseWithPhi(&allocator, &phi, &input1, &input2);
+ HGraph* graph = BuildIfElseWithPhi(&phi, &input1, &input2);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
// Check that the register allocator is deterministic.
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 0);
@@ -584,18 +583,18 @@
}
{
- HGraph* graph = BuildIfElseWithPhi(&allocator, &phi, &input1, &input2);
+ HGraph* graph = BuildIfElseWithPhi(&phi, &input1, &input2);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
// Set the phi to a specific register, and check that the inputs get allocated
// the same register.
phi->GetLocations()->UpdateOut(Location::RegisterLocation(2));
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 2);
@@ -604,18 +603,18 @@
}
{
- HGraph* graph = BuildIfElseWithPhi(&allocator, &phi, &input1, &input2);
+ HGraph* graph = BuildIfElseWithPhi(&phi, &input1, &input2);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
// Set input1 to a specific register, and check that the phi and other input get allocated
// the same register.
input1->GetLocations()->UpdateOut(Location::RegisterLocation(2));
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 2);
@@ -624,18 +623,18 @@
}
{
- HGraph* graph = BuildIfElseWithPhi(&allocator, &phi, &input1, &input2);
+ HGraph* graph = BuildIfElseWithPhi(&phi, &input1, &input2);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
// Set input2 to a specific register, and check that the phi and other input get allocated
// the same register.
input2->GetLocations()->UpdateOut(Location::RegisterLocation(2));
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(input1->GetLiveInterval()->GetRegister(), 2);
@@ -650,58 +649,54 @@
PhiHint(Strategy::kRegisterAllocatorLinearScan);
}
-static HGraph* BuildFieldReturn(ArenaAllocator* allocator,
- HInstruction** field,
- HInstruction** ret) {
- HGraph* graph = CreateGraph(allocator);
- HBasicBlock* entry = new (allocator) HBasicBlock(graph);
+HGraph* RegisterAllocatorTest::BuildFieldReturn(HInstruction** field, HInstruction** ret) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (allocator) HParameterValue(
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
entry->AddInstruction(parameter);
- HBasicBlock* block = new (allocator) HBasicBlock(graph);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
entry->AddSuccessor(block);
- *field = new (allocator) HInstanceFieldGet(parameter,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(42),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph->GetDexFile(),
- 0);
+ *field = new (GetAllocator()) HInstanceFieldGet(parameter,
+ nullptr,
+ DataType::Type::kInt32,
+ MemberOffset(42),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph->GetDexFile(),
+ 0);
block->AddInstruction(*field);
- *ret = new (allocator) HReturn(*field);
+ *ret = new (GetAllocator()) HReturn(*field);
block->AddInstruction(*ret);
- HBasicBlock* exit = new (allocator) HBasicBlock(graph);
+ HBasicBlock* exit = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(exit);
block->AddSuccessor(exit);
- exit->AddInstruction(new (allocator) HExit());
+ exit->AddInstruction(new (GetAllocator()) HExit());
graph->BuildDominatorTree();
return graph;
}
void RegisterAllocatorTest::ExpectedInRegisterHint(Strategy strategy) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
HInstruction *field, *ret;
{
- HGraph* graph = BuildFieldReturn(&allocator, &field, &ret);
+ HGraph* graph = BuildFieldReturn(&field, &ret);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
// Sanity check that in normal conditions, the register should be hinted to 0 (EAX).
@@ -709,19 +704,19 @@
}
{
- HGraph* graph = BuildFieldReturn(&allocator, &field, &ret);
+ HGraph* graph = BuildFieldReturn(&field, &ret);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
// Check that the field gets put in the register expected by its use.
// Don't use SetInAt because we are overriding an already allocated location.
ret->GetLocations()->inputs_[0] = Location::RegisterLocation(2);
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(field->GetLiveInterval()->GetRegister(), 2);
@@ -734,50 +729,46 @@
ExpectedInRegisterHint(Strategy::kRegisterAllocatorLinearScan);
}
-static HGraph* BuildTwoSubs(ArenaAllocator* allocator,
- HInstruction** first_sub,
- HInstruction** second_sub) {
- HGraph* graph = CreateGraph(allocator);
- HBasicBlock* entry = new (allocator) HBasicBlock(graph);
+HGraph* RegisterAllocatorTest::BuildTwoSubs(HInstruction** first_sub, HInstruction** second_sub) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (allocator) HParameterValue(
+ HInstruction* parameter = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32);
entry->AddInstruction(parameter);
HInstruction* constant1 = graph->GetIntConstant(1);
HInstruction* constant2 = graph->GetIntConstant(2);
- HBasicBlock* block = new (allocator) HBasicBlock(graph);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
entry->AddSuccessor(block);
- *first_sub = new (allocator) HSub(DataType::Type::kInt32, parameter, constant1);
+ *first_sub = new (GetAllocator()) HSub(DataType::Type::kInt32, parameter, constant1);
block->AddInstruction(*first_sub);
- *second_sub = new (allocator) HSub(DataType::Type::kInt32, *first_sub, constant2);
+ *second_sub = new (GetAllocator()) HSub(DataType::Type::kInt32, *first_sub, constant2);
block->AddInstruction(*second_sub);
- block->AddInstruction(new (allocator) HExit());
+ block->AddInstruction(new (GetAllocator()) HExit());
graph->BuildDominatorTree();
return graph;
}
void RegisterAllocatorTest::SameAsFirstInputHint(Strategy strategy) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
HInstruction *first_sub, *second_sub;
{
- HGraph* graph = BuildTwoSubs(&allocator, &first_sub, &second_sub);
+ HGraph* graph = BuildTwoSubs(&first_sub, &second_sub);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
// Sanity check that in normal conditions, the registers are the same.
@@ -786,11 +777,11 @@
}
{
- HGraph* graph = BuildTwoSubs(&allocator, &first_sub, &second_sub);
+ HGraph* graph = BuildTwoSubs(&first_sub, &second_sub);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
// check that both adds get the same register.
@@ -799,8 +790,8 @@
ASSERT_EQ(first_sub->GetLocations()->Out().GetPolicy(), Location::kSameAsFirstInput);
ASSERT_EQ(second_sub->GetLocations()->Out().GetPolicy(), Location::kSameAsFirstInput);
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
register_allocator->AllocateRegisters();
ASSERT_EQ(first_sub->GetLiveInterval()->GetRegister(), 2);
@@ -814,53 +805,47 @@
SameAsFirstInputHint(Strategy::kRegisterAllocatorLinearScan);
}
-static HGraph* BuildDiv(ArenaAllocator* allocator,
- HInstruction** div) {
- HGraph* graph = CreateGraph(allocator);
- HBasicBlock* entry = new (allocator) HBasicBlock(graph);
+HGraph* RegisterAllocatorTest::BuildDiv(HInstruction** div) {
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* first = new (allocator) HParameterValue(
+ HInstruction* first = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32);
- HInstruction* second = new (allocator) HParameterValue(
+ HInstruction* second = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32);
entry->AddInstruction(first);
entry->AddInstruction(second);
- HBasicBlock* block = new (allocator) HBasicBlock(graph);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
entry->AddSuccessor(block);
- *div =
- new (allocator) HDiv(DataType::Type::kInt32, first, second, 0); // don't care about dex_pc.
+ *div = new (GetAllocator()) HDiv(
+ DataType::Type::kInt32, first, second, 0); // don't care about dex_pc.
block->AddInstruction(*div);
- block->AddInstruction(new (allocator) HExit());
+ block->AddInstruction(new (GetAllocator()) HExit());
graph->BuildDominatorTree();
return graph;
}
-static void ExpectedExactInRegisterAndSameOutputHint(Strategy strategy) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
+void RegisterAllocatorTest::ExpectedExactInRegisterAndSameOutputHint(Strategy strategy) {
HInstruction *div;
+ HGraph* graph = BuildDiv(&div);
+ std::unique_ptr<const X86InstructionSetFeatures> features_x86(
+ X86InstructionSetFeatures::FromCppDefines());
+ x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
+ liveness.Analyze();
- {
- HGraph* graph = BuildDiv(&allocator, &div);
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
- liveness.Analyze();
+ std::unique_ptr<RegisterAllocator> register_allocator =
+ RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
+ register_allocator->AllocateRegisters();
- RegisterAllocator* register_allocator =
- RegisterAllocator::Create(&allocator, &codegen, liveness, strategy);
- register_allocator->AllocateRegisters();
-
- // div on x86 requires its first input in eax and the output be the same as the first input.
- ASSERT_EQ(div->GetLiveInterval()->GetRegister(), 0);
- }
+ // div on x86 requires its first input in eax and the output be the same as the first input.
+ ASSERT_EQ(div->GetLiveInterval()->GetRegister(), 0);
}
// TODO: Enable this test for graph coloring register allocation when iterative move
@@ -874,59 +859,57 @@
// position.
// This test only applies to the linear scan allocator.
TEST_F(RegisterAllocatorTest, SpillInactive) {
- ArenaPool pool;
-
// Create a synthesized graph to please the register_allocator and
// ssa_liveness_analysis code.
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+ HGraph* graph = CreateGraph();
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* one = new (&allocator) HParameterValue(
+ HInstruction* one = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32);
- HInstruction* two = new (&allocator) HParameterValue(
+ HInstruction* two = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32);
- HInstruction* three = new (&allocator) HParameterValue(
+ HInstruction* three = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32);
- HInstruction* four = new (&allocator) HParameterValue(
+ HInstruction* four = new (GetAllocator()) HParameterValue(
graph->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32);
entry->AddInstruction(one);
entry->AddInstruction(two);
entry->AddInstruction(three);
entry->AddInstruction(four);
- HBasicBlock* block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(block);
entry->AddSuccessor(block);
- block->AddInstruction(new (&allocator) HExit());
+ block->AddInstruction(new (GetAllocator()) HExit());
// We create a synthesized user requesting a register, to avoid just spilling the
// intervals.
- HPhi* user = new (&allocator) HPhi(&allocator, 0, 1, DataType::Type::kInt32);
+ HPhi* user = new (GetAllocator()) HPhi(GetAllocator(), 0, 1, DataType::Type::kInt32);
user->AddInput(one);
user->SetBlock(block);
- LocationSummary* locations = new (&allocator) LocationSummary(user, LocationSummary::kNoCall);
+ LocationSummary* locations = new (GetAllocator()) LocationSummary(user, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
static constexpr size_t phi_ranges[][2] = {{20, 30}};
- BuildInterval(phi_ranges, arraysize(phi_ranges), &allocator, -1, user);
+ BuildInterval(phi_ranges, arraysize(phi_ranges), GetScopedAllocator(), -1, user);
// Create an interval with lifetime holes.
static constexpr size_t ranges1[][2] = {{0, 2}, {4, 6}, {8, 10}};
- LiveInterval* first = BuildInterval(ranges1, arraysize(ranges1), &allocator, -1, one);
- first->uses_.push_front(*new(&allocator) UsePosition(user, false, 8));
- first->uses_.push_front(*new(&allocator) UsePosition(user, false, 7));
- first->uses_.push_front(*new(&allocator) UsePosition(user, false, 6));
+ LiveInterval* first = BuildInterval(ranges1, arraysize(ranges1), GetScopedAllocator(), -1, one);
+ first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 8));
+ first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 7));
+ first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 6));
- locations = new (&allocator) LocationSummary(first->GetDefinedBy(), LocationSummary::kNoCall);
+ locations = new (GetAllocator()) LocationSummary(first->GetDefinedBy(), LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
first = first->SplitAt(1);
// Create an interval that conflicts with the next interval, to force the next
// interval to call `AllocateBlockedReg`.
static constexpr size_t ranges2[][2] = {{2, 4}};
- LiveInterval* second = BuildInterval(ranges2, arraysize(ranges2), &allocator, -1, two);
- locations = new (&allocator) LocationSummary(second->GetDefinedBy(), LocationSummary::kNoCall);
+ LiveInterval* second = BuildInterval(ranges2, arraysize(ranges2), GetScopedAllocator(), -1, two);
+ locations =
+ new (GetAllocator()) LocationSummary(second->GetDefinedBy(), LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
// Create an interval that will lead to splitting the first interval. The bug occured
@@ -935,31 +918,32 @@
// "[0, 2(, [4, 6(" in the list of handled intervals, even though we haven't processed intervals
// before lifetime position 6 yet.
static constexpr size_t ranges3[][2] = {{2, 4}, {8, 10}};
- LiveInterval* third = BuildInterval(ranges3, arraysize(ranges3), &allocator, -1, three);
- third->uses_.push_front(*new(&allocator) UsePosition(user, false, 8));
- third->uses_.push_front(*new(&allocator) UsePosition(user, false, 4));
- third->uses_.push_front(*new(&allocator) UsePosition(user, false, 3));
- locations = new (&allocator) LocationSummary(third->GetDefinedBy(), LocationSummary::kNoCall);
+ LiveInterval* third = BuildInterval(ranges3, arraysize(ranges3), GetScopedAllocator(), -1, three);
+ third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 8));
+ third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 4));
+ third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 3));
+ locations = new (GetAllocator()) LocationSummary(third->GetDefinedBy(), LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
third = third->SplitAt(3);
// Because the first part of the split interval was considered handled, this interval
// was free to allocate the same register, even though it conflicts with it.
static constexpr size_t ranges4[][2] = {{4, 6}};
- LiveInterval* fourth = BuildInterval(ranges4, arraysize(ranges4), &allocator, -1, four);
- locations = new (&allocator) LocationSummary(fourth->GetDefinedBy(), LocationSummary::kNoCall);
+ LiveInterval* fourth = BuildInterval(ranges4, arraysize(ranges4), GetScopedAllocator(), -1, four);
+ locations =
+ new (GetAllocator()) LocationSummary(fourth->GetDefinedBy(), LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen);
+ SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
// Populate the instructions in the liveness object, to please the register allocator.
for (size_t i = 0; i < 32; ++i) {
liveness.instructions_from_lifetime_position_.push_back(user);
}
- RegisterAllocatorLinearScan register_allocator(&allocator, &codegen, liveness);
+ RegisterAllocatorLinearScan register_allocator(GetScopedAllocator(), &codegen, liveness);
register_allocator.unhandled_core_intervals_.push_back(fourth);
register_allocator.unhandled_core_intervals_.push_back(third);
register_allocator.unhandled_core_intervals_.push_back(second);
@@ -967,19 +951,18 @@
// Set just one register available to make all intervals compete for the same.
register_allocator.number_of_registers_ = 1;
- register_allocator.registers_array_ = allocator.AllocArray<size_t>(1);
+ register_allocator.registers_array_ = GetAllocator()->AllocArray<size_t>(1);
register_allocator.processing_core_registers_ = true;
register_allocator.unhandled_ = ®ister_allocator.unhandled_core_intervals_;
register_allocator.LinearScan();
// Test that there is no conflicts between intervals.
- ArenaVector<LiveInterval*> intervals(allocator.Adapter());
+ ScopedArenaVector<LiveInterval*> intervals(GetScopedAllocator()->Adapter());
intervals.push_back(first);
intervals.push_back(second);
intervals.push_back(third);
intervals.push_back(fourth);
- ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
- intervals, 0, 0, codegen, &allocator, true, false));
+ ASSERT_TRUE(ValidateIntervals(intervals, codegen));
}
} // namespace art
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
index 5212e86..c673d54 100644
--- a/compiler/optimizing/scheduler.cc
+++ b/compiler/optimizing/scheduler.cc
@@ -18,6 +18,8 @@
#include "scheduler.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
#include "data_type-inl.h"
#include "prepare_for_register_allocation.h"
@@ -442,7 +444,7 @@
}
void SchedulingGraph::DumpAsDotGraph(const std::string& description,
- const ArenaVector<SchedulingNode*>& initial_candidates) {
+ const ScopedArenaVector<SchedulingNode*>& initial_candidates) {
// TODO(xueliang): ideally we should move scheduling information into HInstruction, after that
// we should move this dotty graph dump feature to visualizer, and have a compiler option for it.
std::ofstream output("scheduling_graphs.dot", std::ofstream::out | std::ofstream::app);
@@ -451,7 +453,7 @@
// Start the dot graph. Use an increasing index for easier differentiation.
output << "digraph G {\n";
for (const auto& entry : nodes_map_) {
- SchedulingNode* node = entry.second;
+ SchedulingNode* node = entry.second.get();
DumpAsDotNode(output, node);
}
// Create a fake 'end_of_scheduling' node to help visualization of critical_paths.
@@ -466,7 +468,7 @@
}
SchedulingNode* CriticalPathSchedulingNodeSelector::SelectMaterializedCondition(
- ArenaVector<SchedulingNode*>* nodes, const SchedulingGraph& graph) const {
+ ScopedArenaVector<SchedulingNode*>* nodes, const SchedulingGraph& graph) const {
// Schedule condition inputs that can be materialized immediately before their use.
// In following example, after we've scheduled HSelect, we want LessThan to be scheduled
// immediately, because it is a materialized condition, and will be emitted right before HSelect
@@ -506,7 +508,7 @@
}
SchedulingNode* CriticalPathSchedulingNodeSelector::PopHighestPriorityNode(
- ArenaVector<SchedulingNode*>* nodes, const SchedulingGraph& graph) {
+ ScopedArenaVector<SchedulingNode*>* nodes, const SchedulingGraph& graph) {
DCHECK(!nodes->empty());
SchedulingNode* select_node = nullptr;
@@ -562,7 +564,7 @@
}
void HScheduler::Schedule(HBasicBlock* block) {
- ArenaVector<SchedulingNode*> scheduling_nodes(arena_->Adapter(kArenaAllocScheduler));
+ ScopedArenaVector<SchedulingNode*> scheduling_nodes(allocator_->Adapter(kArenaAllocScheduler));
// Build the scheduling graph.
scheduling_graph_.Clear();
@@ -593,7 +595,7 @@
}
}
- ArenaVector<SchedulingNode*> initial_candidates(arena_->Adapter(kArenaAllocScheduler));
+ ScopedArenaVector<SchedulingNode*> initial_candidates(allocator_->Adapter(kArenaAllocScheduler));
if (kDumpDotSchedulingGraphs) {
// Remember the list of initial candidates for debug output purposes.
initial_candidates.assign(candidates_.begin(), candidates_.end());
@@ -779,7 +781,7 @@
#if defined(ART_ENABLE_CODEGEN_arm64) || defined(ART_ENABLE_CODEGEN_arm)
// Phase-local allocator that allocates scheduler internal data structures like
// scheduling nodes, internel nodes map, dependencies, etc.
- ArenaAllocator arena_allocator(graph_->GetArena()->GetArenaPool());
+ ScopedArenaAllocator arena_allocator(graph_->GetArenaStack());
CriticalPathSchedulingNodeSelector critical_path_selector;
RandomSchedulingNodeSelector random_selector;
SchedulingNodeSelector* selector = schedule_randomly
diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h
index 66ffac5..3efd26a 100644
--- a/compiler/optimizing/scheduler.h
+++ b/compiler/optimizing/scheduler.h
@@ -19,6 +19,8 @@
#include <fstream>
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
#include "base/time_utils.h"
#include "code_generator.h"
#include "driver/compiler_driver.h"
@@ -152,16 +154,16 @@
/**
* A node representing an `HInstruction` in the `SchedulingGraph`.
*/
-class SchedulingNode : public ArenaObject<kArenaAllocScheduler> {
+class SchedulingNode : public DeletableArenaObject<kArenaAllocScheduler> {
public:
- SchedulingNode(HInstruction* instr, ArenaAllocator* arena, bool is_scheduling_barrier)
+ SchedulingNode(HInstruction* instr, ScopedArenaAllocator* allocator, bool is_scheduling_barrier)
: latency_(0),
internal_latency_(0),
critical_path_(0),
instruction_(instr),
is_scheduling_barrier_(is_scheduling_barrier),
- data_predecessors_(arena->Adapter(kArenaAllocScheduler)),
- other_predecessors_(arena->Adapter(kArenaAllocScheduler)),
+ data_predecessors_(allocator->Adapter(kArenaAllocScheduler)),
+ other_predecessors_(allocator->Adapter(kArenaAllocScheduler)),
num_unscheduled_successors_(0) {
data_predecessors_.reserve(kPreallocatedPredecessors);
}
@@ -171,11 +173,19 @@
predecessor->num_unscheduled_successors_++;
}
+ const ScopedArenaVector<SchedulingNode*>& GetDataPredecessors() const {
+ return data_predecessors_;
+ }
+
void AddOtherPredecessor(SchedulingNode* predecessor) {
other_predecessors_.push_back(predecessor);
predecessor->num_unscheduled_successors_++;
}
+ const ScopedArenaVector<SchedulingNode*>& GetOtherPredecessors() const {
+ return other_predecessors_;
+ }
+
void DecrementNumberOfUnscheduledSuccessors() {
num_unscheduled_successors_--;
}
@@ -195,8 +205,6 @@
void SetInternalLatency(uint32_t internal_latency) { internal_latency_ = internal_latency; }
uint32_t GetCriticalPath() const { return critical_path_; }
bool IsSchedulingBarrier() const { return is_scheduling_barrier_; }
- const ArenaVector<SchedulingNode*>& GetDataPredecessors() const { return data_predecessors_; }
- const ArenaVector<SchedulingNode*>& GetOtherPredecessors() const { return other_predecessors_; }
private:
// The latency of this node. It represents the latency between the moment the
@@ -227,8 +235,8 @@
// Predecessors in `data_predecessors_` are data dependencies. Those in
// `other_predecessors_` contain side-effect dependencies, environment
// dependencies, and scheduling barrier dependencies.
- ArenaVector<SchedulingNode*> data_predecessors_;
- ArenaVector<SchedulingNode*> other_predecessors_;
+ ScopedArenaVector<SchedulingNode*> data_predecessors_;
+ ScopedArenaVector<SchedulingNode*> other_predecessors_;
// The number of unscheduled successors for this node. This number is
// decremented as successors are scheduled. When it reaches zero this node
@@ -243,19 +251,21 @@
*/
class SchedulingGraph : public ValueObject {
public:
- SchedulingGraph(const HScheduler* scheduler, ArenaAllocator* arena)
+ SchedulingGraph(const HScheduler* scheduler, ScopedArenaAllocator* allocator)
: scheduler_(scheduler),
- arena_(arena),
+ arena_(allocator),
contains_scheduling_barrier_(false),
nodes_map_(arena_->Adapter(kArenaAllocScheduler)),
heap_location_collector_(nullptr) {}
SchedulingNode* AddNode(HInstruction* instr, bool is_scheduling_barrier = false) {
- SchedulingNode* node = new (arena_) SchedulingNode(instr, arena_, is_scheduling_barrier);
- nodes_map_.Insert(std::make_pair(instr, node));
+ std::unique_ptr<SchedulingNode> node(
+ new (arena_) SchedulingNode(instr, arena_, is_scheduling_barrier));
+ SchedulingNode* result = node.get();
+ nodes_map_.Insert(std::make_pair(instr, std::move(node)));
contains_scheduling_barrier_ |= is_scheduling_barrier;
AddDependencies(instr, is_scheduling_barrier);
- return node;
+ return result;
}
void Clear() {
@@ -272,7 +282,7 @@
if (it == nodes_map_.end()) {
return nullptr;
} else {
- return it->second;
+ return it->second.get();
}
}
@@ -290,7 +300,7 @@
// Dump the scheduling graph, in dot file format, appending it to the file
// `scheduling_graphs.dot`.
void DumpAsDotGraph(const std::string& description,
- const ArenaVector<SchedulingNode*>& initial_candidates);
+ const ScopedArenaVector<SchedulingNode*>& initial_candidates);
protected:
void AddDependency(SchedulingNode* node, SchedulingNode* dependency, bool is_data_dependency);
@@ -313,11 +323,11 @@
const HScheduler* const scheduler_;
- ArenaAllocator* const arena_;
+ ScopedArenaAllocator* const arena_;
bool contains_scheduling_barrier_;
- ArenaHashMap<const HInstruction*, SchedulingNode*> nodes_map_;
+ ScopedArenaHashMap<const HInstruction*, std::unique_ptr<SchedulingNode>> nodes_map_;
const HeapLocationCollector* heap_location_collector_;
};
@@ -367,11 +377,11 @@
class SchedulingNodeSelector : public ArenaObject<kArenaAllocScheduler> {
public:
- virtual SchedulingNode* PopHighestPriorityNode(ArenaVector<SchedulingNode*>* nodes,
+ virtual SchedulingNode* PopHighestPriorityNode(ScopedArenaVector<SchedulingNode*>* nodes,
const SchedulingGraph& graph) = 0;
virtual ~SchedulingNodeSelector() {}
protected:
- static void DeleteNodeAtIndex(ArenaVector<SchedulingNode*>* nodes, size_t index) {
+ static void DeleteNodeAtIndex(ScopedArenaVector<SchedulingNode*>* nodes, size_t index) {
(*nodes)[index] = nodes->back();
nodes->pop_back();
}
@@ -387,7 +397,7 @@
srand(seed_);
}
- SchedulingNode* PopHighestPriorityNode(ArenaVector<SchedulingNode*>* nodes,
+ SchedulingNode* PopHighestPriorityNode(ScopedArenaVector<SchedulingNode*>* nodes,
const SchedulingGraph& graph) OVERRIDE {
UNUSED(graph);
DCHECK(!nodes->empty());
@@ -408,15 +418,15 @@
public:
CriticalPathSchedulingNodeSelector() : prev_select_(nullptr) {}
- SchedulingNode* PopHighestPriorityNode(ArenaVector<SchedulingNode*>* nodes,
+ SchedulingNode* PopHighestPriorityNode(ScopedArenaVector<SchedulingNode*>* nodes,
const SchedulingGraph& graph) OVERRIDE;
protected:
SchedulingNode* GetHigherPrioritySchedulingNode(SchedulingNode* candidate,
SchedulingNode* check) const;
- SchedulingNode* SelectMaterializedCondition(ArenaVector<SchedulingNode*>* nodes,
- const SchedulingGraph& graph) const;
+ SchedulingNode* SelectMaterializedCondition(ScopedArenaVector<SchedulingNode*>* nodes,
+ const SchedulingGraph& graph) const;
private:
const SchedulingNode* prev_select_;
@@ -424,16 +434,16 @@
class HScheduler {
public:
- HScheduler(ArenaAllocator* arena,
+ HScheduler(ScopedArenaAllocator* allocator,
SchedulingLatencyVisitor* latency_visitor,
SchedulingNodeSelector* selector)
- : arena_(arena),
+ : allocator_(allocator),
latency_visitor_(latency_visitor),
selector_(selector),
only_optimize_loop_blocks_(true),
- scheduling_graph_(this, arena),
+ scheduling_graph_(this, allocator),
cursor_(nullptr),
- candidates_(arena_->Adapter(kArenaAllocScheduler)) {}
+ candidates_(allocator_->Adapter(kArenaAllocScheduler)) {}
virtual ~HScheduler() {}
void Schedule(HGraph* graph);
@@ -461,7 +471,7 @@
node->SetInternalLatency(latency_visitor_->GetLastVisitedInternalLatency());
}
- ArenaAllocator* const arena_;
+ ScopedArenaAllocator* const allocator_;
SchedulingLatencyVisitor* const latency_visitor_;
SchedulingNodeSelector* const selector_;
bool only_optimize_loop_blocks_;
@@ -473,7 +483,7 @@
HInstruction* cursor_;
// The list of candidates for scheduling. A node becomes a candidate when all
// its predecessors have been scheduled.
- ArenaVector<SchedulingNode*> candidates_;
+ ScopedArenaVector<SchedulingNode*> candidates_;
private:
DISALLOW_COPY_AND_ASSIGN(HScheduler);
diff --git a/compiler/optimizing/scheduler_arm.cc b/compiler/optimizing/scheduler_arm.cc
index 110db47..b3c8f10 100644
--- a/compiler/optimizing/scheduler_arm.cc
+++ b/compiler/optimizing/scheduler_arm.cc
@@ -514,9 +514,10 @@
DataType::Type type = instr->InputAt(0)->GetType();
switch (type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
- case DataType::Type::kInt16:
case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
case DataType::Type::kInt32:
last_visited_internal_latency_ = 2 * kArmIntegerOpLatency;
break;
@@ -633,9 +634,10 @@
switch (type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
- case DataType::Type::kInt16:
case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
case DataType::Type::kInt32: {
if (maybe_compressed_char_at) {
last_visited_internal_latency_ += kArmMemoryLoadLatency;
@@ -733,9 +735,10 @@
switch (value_type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
- case DataType::Type::kInt16:
case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
case DataType::Type::kInt32: {
if (index->IsConstant()) {
last_visited_latency_ = kArmMemoryStoreLatency;
@@ -916,9 +919,10 @@
switch (field_type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
- case DataType::Type::kInt16:
case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
case DataType::Type::kInt32:
last_visited_latency_ = kArmMemoryLoadLatency;
break;
@@ -977,9 +981,10 @@
switch (field_type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
- case DataType::Type::kInt16:
case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
if (is_volatile) {
last_visited_internal_latency_ = kArmMemoryBarrierLatency + kArmMemoryStoreLatency;
last_visited_latency_ = kArmMemoryBarrierLatency;
@@ -1047,6 +1052,7 @@
DataType::Type input_type = instr->GetInputType();
switch (result_type) {
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -1072,6 +1078,7 @@
case DataType::Type::kInt64:
switch (input_type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -1095,6 +1102,7 @@
case DataType::Type::kFloat32:
switch (input_type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
@@ -1118,6 +1126,7 @@
case DataType::Type::kFloat64:
switch (input_type) {
case DataType::Type::kBool:
+ case DataType::Type::kUint8:
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
diff --git a/compiler/optimizing/scheduler_arm.h b/compiler/optimizing/scheduler_arm.h
index fe274d2..0cb8684 100644
--- a/compiler/optimizing/scheduler_arm.h
+++ b/compiler/optimizing/scheduler_arm.h
@@ -137,10 +137,10 @@
class HSchedulerARM : public HScheduler {
public:
- HSchedulerARM(ArenaAllocator* arena,
+ HSchedulerARM(ScopedArenaAllocator* allocator,
SchedulingNodeSelector* selector,
SchedulingLatencyVisitorARM* arm_latency_visitor)
- : HScheduler(arena, arm_latency_visitor, selector) {}
+ : HScheduler(allocator, arm_latency_visitor, selector) {}
~HSchedulerARM() OVERRIDE {}
bool IsSchedulable(const HInstruction* instruction) const OVERRIDE {
diff --git a/compiler/optimizing/scheduler_arm64.h b/compiler/optimizing/scheduler_arm64.h
index e1a80ec..32f161f 100644
--- a/compiler/optimizing/scheduler_arm64.h
+++ b/compiler/optimizing/scheduler_arm64.h
@@ -131,8 +131,8 @@
class HSchedulerARM64 : public HScheduler {
public:
- HSchedulerARM64(ArenaAllocator* arena, SchedulingNodeSelector* selector)
- : HScheduler(arena, &arm64_latency_visitor_, selector) {}
+ HSchedulerARM64(ScopedArenaAllocator* allocator, SchedulingNodeSelector* selector)
+ : HScheduler(allocator, &arm64_latency_visitor_, selector) {}
~HSchedulerARM64() OVERRIDE {}
bool IsSchedulable(const HInstruction* instruction) const OVERRIDE {
diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc
index 0e6e0c5..dfc1633 100644
--- a/compiler/optimizing/scheduler_test.cc
+++ b/compiler/optimizing/scheduler_test.cc
@@ -71,16 +71,14 @@
return v;
}
-class SchedulerTest : public CommonCompilerTest {
+class SchedulerTest : public OptimizingUnitTest {
public:
- SchedulerTest() : pool_(), allocator_(&pool_) {
- graph_ = CreateGraph(&allocator_);
- }
+ SchedulerTest() : graph_(CreateGraph()) { }
// Build scheduling graph, and run target specific scheduling on it.
void TestBuildDependencyGraphAndSchedule(HScheduler* scheduler) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
- HBasicBlock* block1 = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
+ HBasicBlock* block1 = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->AddBlock(block1);
graph_->SetEntryBlock(entry);
@@ -100,23 +98,25 @@
// array_get2 ArrayGet [array, add1]
// array_set2 ArraySet [array, add1, add2]
- HInstruction* array = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kReference);
+ HInstruction* array = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kReference);
HInstruction* c1 = graph_->GetIntConstant(1);
HInstruction* c2 = graph_->GetIntConstant(10);
- HInstruction* add1 = new (&allocator_) HAdd(DataType::Type::kInt32, c1, c2);
- HInstruction* add2 = new (&allocator_) HAdd(DataType::Type::kInt32, add1, c2);
- HInstruction* mul = new (&allocator_) HMul(DataType::Type::kInt32, add1, add2);
- HInstruction* div_check = new (&allocator_) HDivZeroCheck(add2, 0);
- HInstruction* div = new (&allocator_) HDiv(DataType::Type::kInt32, add1, div_check, 0);
- HInstruction* array_get1 = new (&allocator_) HArrayGet(array, add1, DataType::Type::kInt32, 0);
+ HInstruction* add1 = new (GetAllocator()) HAdd(DataType::Type::kInt32, c1, c2);
+ HInstruction* add2 = new (GetAllocator()) HAdd(DataType::Type::kInt32, add1, c2);
+ HInstruction* mul = new (GetAllocator()) HMul(DataType::Type::kInt32, add1, add2);
+ HInstruction* div_check = new (GetAllocator()) HDivZeroCheck(add2, 0);
+ HInstruction* div = new (GetAllocator()) HDiv(DataType::Type::kInt32, add1, div_check, 0);
+ HInstruction* array_get1 =
+ new (GetAllocator()) HArrayGet(array, add1, DataType::Type::kInt32, 0);
HInstruction* array_set1 =
- new (&allocator_) HArraySet(array, add1, add2, DataType::Type::kInt32, 0);
- HInstruction* array_get2 = new (&allocator_) HArrayGet(array, add1, DataType::Type::kInt32, 0);
+ new (GetAllocator()) HArraySet(array, add1, add2, DataType::Type::kInt32, 0);
+ HInstruction* array_get2 =
+ new (GetAllocator()) HArrayGet(array, add1, DataType::Type::kInt32, 0);
HInstruction* array_set2 =
- new (&allocator_) HArraySet(array, add1, add2, DataType::Type::kInt32, 0);
+ new (GetAllocator()) HArraySet(array, add1, add2, DataType::Type::kInt32, 0);
DCHECK(div_check->CanThrow());
@@ -135,18 +135,18 @@
block1->AddInstruction(instr);
}
- HEnvironment* environment = new (&allocator_) HEnvironment(&allocator_,
- 2,
- graph_->GetArtMethod(),
- 0,
- div_check);
+ HEnvironment* environment = new (GetAllocator()) HEnvironment(GetAllocator(),
+ 2,
+ graph_->GetArtMethod(),
+ 0,
+ div_check);
div_check->SetRawEnvironment(environment);
environment->SetRawEnvAt(0, add2);
add2->AddEnvUseAt(div_check->GetEnvironment(), 0);
environment->SetRawEnvAt(1, mul);
mul->AddEnvUseAt(div_check->GetEnvironment(), 1);
- SchedulingGraph scheduling_graph(scheduler, graph_->GetArena());
+ SchedulingGraph scheduling_graph(scheduler, GetScopedAllocator());
// Instructions must be inserted in reverse order into the scheduling graph.
for (HInstruction* instr : ReverseRange(block_instructions)) {
scheduling_graph.AddNode(instr);
@@ -184,7 +184,7 @@
void CompileWithRandomSchedulerAndRun(const uint16_t* data, bool has_result, int expected) {
for (CodegenTargetConfig target_config : GetTargetConfigs()) {
- HGraph* graph = CreateCFG(&allocator_, data);
+ HGraph* graph = CreateCFG(data);
// Schedule the graph randomly.
HInstructionScheduling scheduling(graph, target_config.GetInstructionSet());
@@ -198,55 +198,57 @@
}
void TestDependencyGraphOnAliasingArrayAccesses(HScheduler* scheduler) {
- HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ HBasicBlock* entry = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
graph_->BuildDominatorTree();
- HInstruction* arr = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kReference);
- HInstruction* i = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(1),
- 1,
- DataType::Type::kInt32);
- HInstruction* j = new (&allocator_) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(1),
- 1,
- DataType::Type::kInt32);
- HInstruction* object = new (&allocator_) HParameterValue(graph_->GetDexFile(),
+ HInstruction* arr = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
dex::TypeIndex(0),
0,
DataType::Type::kReference);
+ HInstruction* i = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(1),
+ 1,
+ DataType::Type::kInt32);
+ HInstruction* j = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(1),
+ 1,
+ DataType::Type::kInt32);
+ HInstruction* object = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kReference);
HInstruction* c0 = graph_->GetIntConstant(0);
HInstruction* c1 = graph_->GetIntConstant(1);
- HInstruction* add0 = new (&allocator_) HAdd(DataType::Type::kInt32, i, c0);
- HInstruction* add1 = new (&allocator_) HAdd(DataType::Type::kInt32, i, c1);
- HInstruction* sub0 = new (&allocator_) HSub(DataType::Type::kInt32, i, c0);
- HInstruction* sub1 = new (&allocator_) HSub(DataType::Type::kInt32, i, c1);
- HInstruction* arr_set_0 = new (&allocator_) HArraySet(arr, c0, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set_1 = new (&allocator_) HArraySet(arr, c1, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set_i = new (&allocator_) HArraySet(arr, i, c0, DataType::Type::kInt32, 0);
+ HInstruction* add0 = new (GetAllocator()) HAdd(DataType::Type::kInt32, i, c0);
+ HInstruction* add1 = new (GetAllocator()) HAdd(DataType::Type::kInt32, i, c1);
+ HInstruction* sub0 = new (GetAllocator()) HSub(DataType::Type::kInt32, i, c0);
+ HInstruction* sub1 = new (GetAllocator()) HSub(DataType::Type::kInt32, i, c1);
+ HInstruction* arr_set_0 =
+ new (GetAllocator()) HArraySet(arr, c0, c0, DataType::Type::kInt32, 0);
+ HInstruction* arr_set_1 =
+ new (GetAllocator()) HArraySet(arr, c1, c0, DataType::Type::kInt32, 0);
+ HInstruction* arr_set_i = new (GetAllocator()) HArraySet(arr, i, c0, DataType::Type::kInt32, 0);
HInstruction* arr_set_add0 =
- new (&allocator_) HArraySet(arr, add0, c0, DataType::Type::kInt32, 0);
+ new (GetAllocator()) HArraySet(arr, add0, c0, DataType::Type::kInt32, 0);
HInstruction* arr_set_add1 =
- new (&allocator_) HArraySet(arr, add1, c0, DataType::Type::kInt32, 0);
+ new (GetAllocator()) HArraySet(arr, add1, c0, DataType::Type::kInt32, 0);
HInstruction* arr_set_sub0 =
- new (&allocator_) HArraySet(arr, sub0, c0, DataType::Type::kInt32, 0);
+ new (GetAllocator()) HArraySet(arr, sub0, c0, DataType::Type::kInt32, 0);
HInstruction* arr_set_sub1 =
- new (&allocator_) HArraySet(arr, sub1, c0, DataType::Type::kInt32, 0);
- HInstruction* arr_set_j = new (&allocator_) HArraySet(arr, j, c0, DataType::Type::kInt32, 0);
- HInstanceFieldSet* set_field10 = new (&allocator_) HInstanceFieldSet(object,
- c1,
- nullptr,
- DataType::Type::kInt32,
- MemberOffset(10),
- false,
- kUnknownFieldIndex,
- kUnknownClassDefIndex,
- graph_->GetDexFile(),
- 0);
+ new (GetAllocator()) HArraySet(arr, sub1, c0, DataType::Type::kInt32, 0);
+ HInstruction* arr_set_j = new (GetAllocator()) HArraySet(arr, j, c0, DataType::Type::kInt32, 0);
+ HInstanceFieldSet* set_field10 = new (GetAllocator()) HInstanceFieldSet(object,
+ c1,
+ nullptr,
+ DataType::Type::kInt32,
+ MemberOffset(10),
+ false,
+ kUnknownFieldIndex,
+ kUnknownClassDefIndex,
+ graph_->GetDexFile(),
+ 0);
HInstruction* block_instructions[] = {arr,
i,
@@ -270,7 +272,7 @@
entry->AddInstruction(instr);
}
- SchedulingGraph scheduling_graph(scheduler, graph_->GetArena());
+ SchedulingGraph scheduling_graph(scheduler, GetScopedAllocator());
HeapLocationCollector heap_location_collector(graph_);
heap_location_collector.VisitBasicBlock(entry);
heap_location_collector.BuildAliasingMatrix();
@@ -342,21 +344,19 @@
scheduler->Schedule(graph_);
}
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
};
#if defined(ART_ENABLE_CODEGEN_arm64)
TEST_F(SchedulerTest, DependencyGraphAndSchedulerARM64) {
CriticalPathSchedulingNodeSelector critical_path_selector;
- arm64::HSchedulerARM64 scheduler(&allocator_, &critical_path_selector);
+ arm64::HSchedulerARM64 scheduler(GetScopedAllocator(), &critical_path_selector);
TestBuildDependencyGraphAndSchedule(&scheduler);
}
TEST_F(SchedulerTest, ArrayAccessAliasingARM64) {
CriticalPathSchedulingNodeSelector critical_path_selector;
- arm64::HSchedulerARM64 scheduler(&allocator_, &critical_path_selector);
+ arm64::HSchedulerARM64 scheduler(GetScopedAllocator(), &critical_path_selector);
TestDependencyGraphOnAliasingArrayAccesses(&scheduler);
}
#endif
@@ -365,14 +365,14 @@
TEST_F(SchedulerTest, DependencyGraphAndSchedulerARM) {
CriticalPathSchedulingNodeSelector critical_path_selector;
arm::SchedulingLatencyVisitorARM arm_latency_visitor(/*CodeGenerator*/ nullptr);
- arm::HSchedulerARM scheduler(&allocator_, &critical_path_selector, &arm_latency_visitor);
+ arm::HSchedulerARM scheduler(GetScopedAllocator(), &critical_path_selector, &arm_latency_visitor);
TestBuildDependencyGraphAndSchedule(&scheduler);
}
TEST_F(SchedulerTest, ArrayAccessAliasingARM) {
CriticalPathSchedulingNodeSelector critical_path_selector;
arm::SchedulingLatencyVisitorARM arm_latency_visitor(/*CodeGenerator*/ nullptr);
- arm::HSchedulerARM scheduler(&allocator_, &critical_path_selector, &arm_latency_visitor);
+ arm::HSchedulerARM scheduler(GetScopedAllocator(), &critical_path_selector, &arm_latency_visitor);
TestDependencyGraphOnAliasingArrayAccesses(&scheduler);
}
#endif
diff --git a/compiler/optimizing/select_generator.cc b/compiler/optimizing/select_generator.cc
index 827b591..0e46aec 100644
--- a/compiler/optimizing/select_generator.cc
+++ b/compiler/optimizing/select_generator.cc
@@ -135,10 +135,10 @@
DCHECK(both_successors_return || phi != nullptr);
// Create the Select instruction and insert it in front of the If.
- HSelect* select = new (graph_->GetArena()) HSelect(if_instruction->InputAt(0),
- true_value,
- false_value,
- if_instruction->GetDexPc());
+ HSelect* select = new (graph_->GetAllocator()) HSelect(if_instruction->InputAt(0),
+ true_value,
+ false_value,
+ if_instruction->GetDexPc());
if (both_successors_return) {
if (true_value->GetType() == DataType::Type::kReference) {
DCHECK(false_value->GetType() == DataType::Type::kReference);
diff --git a/compiler/optimizing/side_effects_analysis.h b/compiler/optimizing/side_effects_analysis.h
index fea47e6..cf00e48 100644
--- a/compiler/optimizing/side_effects_analysis.h
+++ b/compiler/optimizing/side_effects_analysis.h
@@ -29,9 +29,9 @@
: HOptimization(graph, pass_name),
graph_(graph),
block_effects_(graph->GetBlocks().size(),
- graph->GetArena()->Adapter(kArenaAllocSideEffectsAnalysis)),
+ graph->GetAllocator()->Adapter(kArenaAllocSideEffectsAnalysis)),
loop_effects_(graph->GetBlocks().size(),
- graph->GetArena()->Adapter(kArenaAllocSideEffectsAnalysis)) {}
+ graph->GetAllocator()->Adapter(kArenaAllocSideEffectsAnalysis)) {}
SideEffects GetLoopEffects(HBasicBlock* block) const;
SideEffects GetBlockEffects(HBasicBlock* block) const;
diff --git a/compiler/optimizing/side_effects_test.cc b/compiler/optimizing/side_effects_test.cc
index ac5eb15..9731712 100644
--- a/compiler/optimizing/side_effects_test.cc
+++ b/compiler/optimizing/side_effects_test.cc
@@ -21,6 +21,19 @@
namespace art {
+// Only runtime types other than void are allowed.
+static const DataType::Type kTestTypes[] = {
+ DataType::Type::kReference,
+ DataType::Type::kBool,
+ DataType::Type::kInt8,
+ DataType::Type::kUint16,
+ DataType::Type::kInt16,
+ DataType::Type::kInt32,
+ DataType::Type::kInt64,
+ DataType::Type::kFloat32,
+ DataType::Type::kFloat64,
+};
+
/**
* Tests for the SideEffects class.
*/
@@ -91,9 +104,7 @@
TEST(SideEffectsTest, DependencesAndNoDependences) {
// Apply test to each individual data type.
- for (DataType::Type type = DataType::Type::kReference;
- type < DataType::Type::kVoid;
- type = static_cast<DataType::Type>(static_cast<uint8_t>(type) + 1u)) {
+ for (DataType::Type type : kTestTypes) {
// Same data type and access type: proper write/read dep.
testWriteAndReadDependence(
SideEffects::FieldWriteOfType(type, false),
@@ -169,9 +180,7 @@
TEST(SideEffectsTest, AllWritesAndReads) {
SideEffects s = SideEffects::None();
// Keep taking the union of different writes and reads.
- for (DataType::Type type = DataType::Type::kReference;
- type < DataType::Type::kVoid;
- type = static_cast<DataType::Type>(static_cast<uint8_t>(type) + 1u)) {
+ for (DataType::Type type : kTestTypes) {
s = s.Union(SideEffects::FieldWriteOfType(type, /* is_volatile */ false));
s = s.Union(SideEffects::ArrayWriteOfType(type));
s = s.Union(SideEffects::FieldReadOfType(type, /* is_volatile */ false));
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 77b7a22..f4a8a17 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -233,7 +233,7 @@
}
void SsaBuilder::RunPrimitiveTypePropagation() {
- ArenaVector<HPhi*> worklist(graph_->GetArena()->Adapter(kArenaAllocGraphBuilder));
+ ArenaVector<HPhi*> worklist(graph_->GetAllocator()->Adapter(kArenaAllocGraphBuilder));
for (HBasicBlock* block : graph_->GetReversePostOrder()) {
if (block->IsLoopHeader()) {
@@ -293,7 +293,7 @@
DCHECK(DataType::IsIntOrLongType(type));
DCHECK(FindFloatOrDoubleEquivalentOfArrayGet(aget) == nullptr);
- HArrayGet* equivalent = new (aget->GetBlock()->GetGraph()->GetArena()) HArrayGet(
+ HArrayGet* equivalent = new (aget->GetBlock()->GetGraph()->GetAllocator()) HArrayGet(
aget->GetArray(),
aget->GetIndex(),
type == DataType::Type::kInt32 ? DataType::Type::kFloat32 : DataType::Type::kFloat64,
@@ -319,7 +319,7 @@
// uses (because they are untyped) and environment uses (if --debuggable).
// After resolving all ambiguous ArrayGets, we will re-run primitive type
// propagation on the Phis which need to be updated.
- ArenaVector<HPhi*> worklist(graph_->GetArena()->Adapter(kArenaAllocGraphBuilder));
+ ArenaVector<HPhi*> worklist(graph_->GetAllocator()->Adapter(kArenaAllocGraphBuilder));
{
ScopedObjectAccess soa(Thread::Current());
@@ -393,7 +393,7 @@
}
// Refine the side effects of this floating point aset. Note that we do this even if
// no replacement occurs, since the right-hand-side may have been corrected already.
- aset->ComputeSideEffects();
+ aset->SetSideEffects(HArraySet::ComputeSideEffects(aset->GetComponentType()));
} else {
// Array elements are integral and the value assigned to it initially
// was integral too. Nothing to do.
@@ -566,7 +566,7 @@
HFloatConstant* result = constant->GetNext()->AsFloatConstant();
if (result == nullptr) {
float value = bit_cast<float, int32_t>(constant->GetValue());
- result = new (graph_->GetArena()) HFloatConstant(value);
+ result = new (graph_->GetAllocator()) HFloatConstant(value);
constant->GetBlock()->InsertInstructionBefore(result, constant->GetNext());
graph_->CacheFloatConstant(result);
} else {
@@ -588,7 +588,7 @@
HDoubleConstant* result = constant->GetNext()->AsDoubleConstant();
if (result == nullptr) {
double value = bit_cast<double, int64_t>(constant->GetValue());
- result = new (graph_->GetArena()) HDoubleConstant(value);
+ result = new (graph_->GetAllocator()) HDoubleConstant(value);
constant->GetBlock()->InsertInstructionBefore(result, constant->GetNext());
graph_->CacheDoubleConstant(result);
} else {
@@ -621,7 +621,7 @@
if (next == nullptr
|| (next->AsPhi()->GetRegNumber() != phi->GetRegNumber())
|| (next->GetType() != type)) {
- ArenaAllocator* allocator = graph_->GetArena();
+ ArenaAllocator* allocator = graph_->GetAllocator();
HInputsRef inputs = phi->GetInputs();
HPhi* new_phi =
new (allocator) HPhi(allocator, phi->GetRegNumber(), inputs.size(), type);
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index 1819ee5..509cdc1 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -56,9 +56,9 @@
dex_cache_(dex_cache),
handles_(handles),
agets_fixed_(false),
- ambiguous_agets_(graph->GetArena()->Adapter(kArenaAllocGraphBuilder)),
- ambiguous_asets_(graph->GetArena()->Adapter(kArenaAllocGraphBuilder)),
- uninitialized_strings_(graph->GetArena()->Adapter(kArenaAllocGraphBuilder)) {
+ ambiguous_agets_(graph->GetAllocator()->Adapter(kArenaAllocGraphBuilder)),
+ ambiguous_asets_(graph->GetAllocator()->Adapter(kArenaAllocGraphBuilder)),
+ uninitialized_strings_(graph->GetAllocator()->Adapter(kArenaAllocGraphBuilder)) {
graph_->InitializeInexactObjectRTI(handles);
}
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index f1f1be2..9ab7a89 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -26,7 +26,7 @@
void SsaLivenessAnalysis::Analyze() {
// Compute the linear order directly in the graph's data structure
// (there are no more following graph mutations).
- LinearizeGraph(graph_, graph_->GetArena(), &graph_->linear_order_);
+ LinearizeGraph(graph_, &graph_->linear_order_);
// Liveness analysis.
NumberInstructions();
@@ -56,7 +56,7 @@
instructions_from_ssa_index_.push_back(current);
current->SetSsaIndex(ssa_index++);
current->SetLiveInterval(
- LiveInterval::MakeInterval(graph_->GetArena(), current->GetType(), current));
+ LiveInterval::MakeInterval(allocator_, current->GetType(), current));
}
current->SetLifetimePosition(lifetime_position);
}
@@ -74,7 +74,7 @@
instructions_from_ssa_index_.push_back(current);
current->SetSsaIndex(ssa_index++);
current->SetLiveInterval(
- LiveInterval::MakeInterval(graph_->GetArena(), current->GetType(), current));
+ LiveInterval::MakeInterval(allocator_, current->GetType(), current));
}
instructions_from_lifetime_position_.push_back(current);
current->SetLifetimePosition(lifetime_position);
@@ -89,7 +89,7 @@
void SsaLivenessAnalysis::ComputeLiveness() {
for (HBasicBlock* block : graph_->GetLinearOrder()) {
block_infos_[block->GetBlockId()] =
- new (graph_->GetArena()) BlockInfo(graph_->GetArena(), *block, number_of_ssa_values_);
+ new (allocator_) BlockInfo(allocator_, *block, number_of_ssa_values_);
}
// Compute the live ranges, as well as the initial live_in, live_out, and kill sets.
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index ec4ab31..9800af7 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -20,6 +20,8 @@
#include <iostream>
#include "base/iteration_range.h"
+#include "base/scoped_arena_allocator.h"
+#include "base/scoped_arena_containers.h"
#include "nodes.h"
#include "utils/intrusive_forward_list.h"
@@ -32,7 +34,7 @@
class BlockInfo : public ArenaObject<kArenaAllocSsaLiveness> {
public:
- BlockInfo(ArenaAllocator* allocator, const HBasicBlock& block, size_t number_of_ssa_values)
+ BlockInfo(ScopedArenaAllocator* allocator, const HBasicBlock& block, size_t number_of_ssa_values)
: block_(block),
live_in_(allocator, number_of_ssa_values, false, kArenaAllocSsaLiveness),
live_out_(allocator, number_of_ssa_values, false, kArenaAllocSsaLiveness),
@@ -82,7 +84,7 @@
stream << "[" << start_ << "," << end_ << ")";
}
- LiveRange* Dup(ArenaAllocator* allocator) const {
+ LiveRange* Dup(ScopedArenaAllocator* allocator) const {
return new (allocator) LiveRange(
start_, end_, next_ == nullptr ? nullptr : next_->Dup(allocator));
}
@@ -135,7 +137,7 @@
return user_->GetBlock()->GetLoopInformation();
}
- UsePosition* Clone(ArenaAllocator* allocator) const {
+ UsePosition* Clone(ScopedArenaAllocator* allocator) const {
return new (allocator) UsePosition(user_, input_index_, position_);
}
@@ -180,7 +182,7 @@
stream << position_;
}
- EnvUsePosition* Clone(ArenaAllocator* allocator) const {
+ EnvUsePosition* Clone(ScopedArenaAllocator* allocator) const {
return new (allocator) EnvUsePosition(environment_, input_index_, position_);
}
@@ -261,17 +263,19 @@
*/
class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
public:
- static LiveInterval* MakeInterval(ArenaAllocator* allocator,
+ static LiveInterval* MakeInterval(ScopedArenaAllocator* allocator,
DataType::Type type,
HInstruction* instruction = nullptr) {
return new (allocator) LiveInterval(allocator, type, instruction);
}
- static LiveInterval* MakeFixedInterval(ArenaAllocator* allocator, int reg, DataType::Type type) {
+ static LiveInterval* MakeFixedInterval(ScopedArenaAllocator* allocator,
+ int reg,
+ DataType::Type type) {
return new (allocator) LiveInterval(allocator, type, nullptr, true, reg, false);
}
- static LiveInterval* MakeTempInterval(ArenaAllocator* allocator, DataType::Type type) {
+ static LiveInterval* MakeTempInterval(ScopedArenaAllocator* allocator, DataType::Type type) {
return new (allocator) LiveInterval(allocator, type, nullptr, false, kNoRegister, true);
}
@@ -969,7 +973,7 @@
}
private:
- LiveInterval(ArenaAllocator* allocator,
+ LiveInterval(ScopedArenaAllocator* allocator,
DataType::Type type,
HInstruction* defined_by = nullptr,
bool is_fixed = false,
@@ -1082,7 +1086,7 @@
}
}
- ArenaAllocator* const allocator_;
+ ScopedArenaAllocator* const allocator_;
// Ranges of this interval. We need a quick access to the last range to test
// for liveness (see `IsDeadAt`).
@@ -1158,14 +1162,15 @@
*/
class SsaLivenessAnalysis : public ValueObject {
public:
- SsaLivenessAnalysis(HGraph* graph, CodeGenerator* codegen)
+ SsaLivenessAnalysis(HGraph* graph, CodeGenerator* codegen, ScopedArenaAllocator* allocator)
: graph_(graph),
codegen_(codegen),
+ allocator_(allocator),
block_infos_(graph->GetBlocks().size(),
nullptr,
- graph->GetArena()->Adapter(kArenaAllocSsaLiveness)),
- instructions_from_ssa_index_(graph->GetArena()->Adapter(kArenaAllocSsaLiveness)),
- instructions_from_lifetime_position_(graph->GetArena()->Adapter(kArenaAllocSsaLiveness)),
+ allocator_->Adapter(kArenaAllocSsaLiveness)),
+ instructions_from_ssa_index_(allocator_->Adapter(kArenaAllocSsaLiveness)),
+ instructions_from_lifetime_position_(allocator_->Adapter(kArenaAllocSsaLiveness)),
number_of_ssa_values_(0) {
}
@@ -1284,13 +1289,18 @@
HGraph* const graph_;
CodeGenerator* const codegen_;
- ArenaVector<BlockInfo*> block_infos_;
+
+ // Use a local ScopedArenaAllocator for allocating memory.
+ // This allocator must remain alive while doing register allocation.
+ ScopedArenaAllocator* allocator_;
+
+ ScopedArenaVector<BlockInfo*> block_infos_;
// Temporary array used when computing live_in, live_out, and kill sets.
- ArenaVector<HInstruction*> instructions_from_ssa_index_;
+ ScopedArenaVector<HInstruction*> instructions_from_ssa_index_;
// Temporary array used when inserting moves in the graph.
- ArenaVector<HInstruction*> instructions_from_lifetime_position_;
+ ScopedArenaVector<HInstruction*> instructions_from_lifetime_position_;
size_t number_of_ssa_values_;
ART_FRIEND_TEST(RegisterAllocatorTest, SpillInactive);
diff --git a/compiler/optimizing/ssa_liveness_analysis_test.cc b/compiler/optimizing/ssa_liveness_analysis_test.cc
index e89bf6d..9b78e0e 100644
--- a/compiler/optimizing/ssa_liveness_analysis_test.cc
+++ b/compiler/optimizing/ssa_liveness_analysis_test.cc
@@ -27,12 +27,10 @@
namespace art {
-class SsaLivenessAnalysisTest : public testing::Test {
+class SsaLivenessAnalysisTest : public OptimizingUnitTest {
public:
SsaLivenessAnalysisTest()
- : pool_(),
- allocator_(&pool_),
- graph_(CreateGraph(&allocator_)),
+ : graph_(CreateGraph()),
compiler_options_(),
instruction_set_(kRuntimeISA) {
std::string error_msg;
@@ -44,7 +42,7 @@
compiler_options_);
CHECK(codegen_ != nullptr) << instruction_set_ << " is not a supported target architecture.";
// Create entry block.
- entry_ = new (&allocator_) HBasicBlock(graph_);
+ entry_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry_);
graph_->SetEntryBlock(entry_);
}
@@ -52,14 +50,12 @@
protected:
HBasicBlock* CreateSuccessor(HBasicBlock* block) {
HGraph* graph = block->GetGraph();
- HBasicBlock* successor = new (&allocator_) HBasicBlock(graph);
+ HBasicBlock* successor = new (GetAllocator()) HBasicBlock(graph);
graph->AddBlock(successor);
block->AddSuccessor(successor);
return successor;
}
- ArenaPool pool_;
- ArenaAllocator allocator_;
HGraph* graph_;
CompilerOptions compiler_options_;
InstructionSet instruction_set_;
@@ -69,17 +65,17 @@
};
TEST_F(SsaLivenessAnalysisTest, TestReturnArg) {
- HInstruction* arg = new (&allocator_) HParameterValue(
+ HInstruction* arg = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32);
entry_->AddInstruction(arg);
HBasicBlock* block = CreateSuccessor(entry_);
- HInstruction* ret = new (&allocator_) HReturn(arg);
+ HInstruction* ret = new (GetAllocator()) HReturn(arg);
block->AddInstruction(ret);
- block->AddInstruction(new (&allocator_) HExit());
+ block->AddInstruction(new (GetAllocator()) HExit());
graph_->BuildDominatorTree();
- SsaLivenessAnalysis ssa_analysis(graph_, codegen_.get());
+ SsaLivenessAnalysis ssa_analysis(graph_, codegen_.get(), GetScopedAllocator());
ssa_analysis.Analyze();
std::ostringstream arg_dump;
@@ -89,49 +85,49 @@
}
TEST_F(SsaLivenessAnalysisTest, TestAput) {
- HInstruction* array = new (&allocator_) HParameterValue(
+ HInstruction* array = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
- HInstruction* index = new (&allocator_) HParameterValue(
+ HInstruction* index = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kInt32);
- HInstruction* value = new (&allocator_) HParameterValue(
+ HInstruction* value = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(2), 2, DataType::Type::kInt32);
- HInstruction* extra_arg1 = new (&allocator_) HParameterValue(
+ HInstruction* extra_arg1 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(3), 3, DataType::Type::kInt32);
- HInstruction* extra_arg2 = new (&allocator_) HParameterValue(
+ HInstruction* extra_arg2 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(4), 4, DataType::Type::kReference);
ArenaVector<HInstruction*> args({ array, index, value, extra_arg1, extra_arg2 },
- allocator_.Adapter());
+ GetAllocator()->Adapter());
for (HInstruction* insn : args) {
entry_->AddInstruction(insn);
}
HBasicBlock* block = CreateSuccessor(entry_);
- HInstruction* null_check = new (&allocator_) HNullCheck(array, 0);
+ HInstruction* null_check = new (GetAllocator()) HNullCheck(array, 0);
block->AddInstruction(null_check);
- HEnvironment* null_check_env = new (&allocator_) HEnvironment(&allocator_,
- /* number_of_vregs */ 5,
- /* method */ nullptr,
- /* dex_pc */ 0u,
- null_check);
+ HEnvironment* null_check_env = new (GetAllocator()) HEnvironment(GetAllocator(),
+ /* number_of_vregs */ 5,
+ /* method */ nullptr,
+ /* dex_pc */ 0u,
+ null_check);
null_check_env->CopyFrom(args);
null_check->SetRawEnvironment(null_check_env);
- HInstruction* length = new (&allocator_) HArrayLength(array, 0);
+ HInstruction* length = new (GetAllocator()) HArrayLength(array, 0);
block->AddInstruction(length);
- HInstruction* bounds_check = new (&allocator_) HBoundsCheck(index, length, /* dex_pc */ 0u);
+ HInstruction* bounds_check = new (GetAllocator()) HBoundsCheck(index, length, /* dex_pc */ 0u);
block->AddInstruction(bounds_check);
- HEnvironment* bounds_check_env = new (&allocator_) HEnvironment(&allocator_,
- /* number_of_vregs */ 5,
- /* method */ nullptr,
- /* dex_pc */ 0u,
- bounds_check);
+ HEnvironment* bounds_check_env = new (GetAllocator()) HEnvironment(GetAllocator(),
+ /* number_of_vregs */ 5,
+ /* method */ nullptr,
+ /* dex_pc */ 0u,
+ bounds_check);
bounds_check_env->CopyFrom(args);
bounds_check->SetRawEnvironment(bounds_check_env);
HInstruction* array_set =
- new (&allocator_) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0);
+ new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0);
block->AddInstruction(array_set);
graph_->BuildDominatorTree();
- SsaLivenessAnalysis ssa_analysis(graph_, codegen_.get());
+ SsaLivenessAnalysis ssa_analysis(graph_, codegen_.get(), GetScopedAllocator());
ssa_analysis.Analyze();
EXPECT_FALSE(graph_->IsDebuggable());
@@ -159,53 +155,53 @@
}
TEST_F(SsaLivenessAnalysisTest, TestDeoptimize) {
- HInstruction* array = new (&allocator_) HParameterValue(
+ HInstruction* array = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
- HInstruction* index = new (&allocator_) HParameterValue(
+ HInstruction* index = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(1), 1, DataType::Type::kInt32);
- HInstruction* value = new (&allocator_) HParameterValue(
+ HInstruction* value = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(2), 2, DataType::Type::kInt32);
- HInstruction* extra_arg1 = new (&allocator_) HParameterValue(
+ HInstruction* extra_arg1 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(3), 3, DataType::Type::kInt32);
- HInstruction* extra_arg2 = new (&allocator_) HParameterValue(
+ HInstruction* extra_arg2 = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(4), 4, DataType::Type::kReference);
ArenaVector<HInstruction*> args({ array, index, value, extra_arg1, extra_arg2 },
- allocator_.Adapter());
+ GetAllocator()->Adapter());
for (HInstruction* insn : args) {
entry_->AddInstruction(insn);
}
HBasicBlock* block = CreateSuccessor(entry_);
- HInstruction* null_check = new (&allocator_) HNullCheck(array, 0);
+ HInstruction* null_check = new (GetAllocator()) HNullCheck(array, 0);
block->AddInstruction(null_check);
- HEnvironment* null_check_env = new (&allocator_) HEnvironment(&allocator_,
- /* number_of_vregs */ 5,
- /* method */ nullptr,
- /* dex_pc */ 0u,
- null_check);
+ HEnvironment* null_check_env = new (GetAllocator()) HEnvironment(GetAllocator(),
+ /* number_of_vregs */ 5,
+ /* method */ nullptr,
+ /* dex_pc */ 0u,
+ null_check);
null_check_env->CopyFrom(args);
null_check->SetRawEnvironment(null_check_env);
- HInstruction* length = new (&allocator_) HArrayLength(array, 0);
+ HInstruction* length = new (GetAllocator()) HArrayLength(array, 0);
block->AddInstruction(length);
// Use HAboveOrEqual+HDeoptimize as the bounds check.
- HInstruction* ae = new (&allocator_) HAboveOrEqual(index, length);
+ HInstruction* ae = new (GetAllocator()) HAboveOrEqual(index, length);
block->AddInstruction(ae);
- HInstruction* deoptimize =
- new(&allocator_) HDeoptimize(&allocator_, ae, DeoptimizationKind::kBlockBCE, /* dex_pc */ 0u);
+ HInstruction* deoptimize = new(GetAllocator()) HDeoptimize(
+ GetAllocator(), ae, DeoptimizationKind::kBlockBCE, /* dex_pc */ 0u);
block->AddInstruction(deoptimize);
- HEnvironment* deoptimize_env = new (&allocator_) HEnvironment(&allocator_,
- /* number_of_vregs */ 5,
- /* method */ nullptr,
- /* dex_pc */ 0u,
- deoptimize);
+ HEnvironment* deoptimize_env = new (GetAllocator()) HEnvironment(GetAllocator(),
+ /* number_of_vregs */ 5,
+ /* method */ nullptr,
+ /* dex_pc */ 0u,
+ deoptimize);
deoptimize_env->CopyFrom(args);
deoptimize->SetRawEnvironment(deoptimize_env);
HInstruction* array_set =
- new (&allocator_) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0);
+ new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0);
block->AddInstruction(array_set);
graph_->BuildDominatorTree();
- SsaLivenessAnalysis ssa_analysis(graph_, codegen_.get());
+ SsaLivenessAnalysis ssa_analysis(graph_, codegen_.get(), GetScopedAllocator());
ssa_analysis.Analyze();
EXPECT_FALSE(graph_->IsDebuggable());
diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc
index b4f8408..3b95b86 100644
--- a/compiler/optimizing/ssa_phi_elimination.cc
+++ b/compiler/optimizing/ssa_phi_elimination.cc
@@ -31,7 +31,7 @@
// Phis are constructed live and should not be revived if previously marked
// dead. This algorithm temporarily breaks that invariant but we DCHECK that
// only phis which were initially live are revived.
- ArenaSet<HPhi*> initially_live(graph_->GetArena()->Adapter(kArenaAllocSsaPhiElimination));
+ ArenaSet<HPhi*> initially_live(graph_->GetAllocator()->Adapter(kArenaAllocSsaPhiElimination));
// Add to the worklist phis referenced by non-phi instructions.
for (HBasicBlock* block : graph_->GetReversePostOrder()) {
@@ -123,11 +123,11 @@
}
}
- ArenaBitVector visited_phis_in_cycle(graph_->GetArena(),
+ ArenaBitVector visited_phis_in_cycle(graph_->GetAllocator(),
graph_->GetCurrentInstructionId(),
/* expandable */ false,
kArenaAllocSsaPhiElimination);
- ArenaVector<HPhi*> cycle_worklist(graph_->GetArena()->Adapter(kArenaAllocSsaPhiElimination));
+ ArenaVector<HPhi*> cycle_worklist(graph_->GetAllocator()->Adapter(kArenaAllocSsaPhiElimination));
while (!worklist_.empty()) {
HPhi* phi = worklist_.back();
diff --git a/compiler/optimizing/ssa_phi_elimination.h b/compiler/optimizing/ssa_phi_elimination.h
index b48e820..e0cde07 100644
--- a/compiler/optimizing/ssa_phi_elimination.h
+++ b/compiler/optimizing/ssa_phi_elimination.h
@@ -31,7 +31,7 @@
public:
explicit SsaDeadPhiElimination(HGraph* graph)
: HOptimization(graph, kSsaDeadPhiEliminationPassName),
- worklist_(graph->GetArena()->Adapter(kArenaAllocSsaPhiElimination)) {
+ worklist_(graph->GetAllocator()->Adapter(kArenaAllocSsaPhiElimination)) {
worklist_.reserve(kDefaultWorklistSize);
}
@@ -60,7 +60,7 @@
public:
explicit SsaRedundantPhiElimination(HGraph* graph)
: HOptimization(graph, kSsaRedundantPhiEliminationPassName),
- worklist_(graph->GetArena()->Adapter(kArenaAllocSsaPhiElimination)) {
+ worklist_(graph->GetAllocator()->Adapter(kArenaAllocSsaPhiElimination)) {
worklist_.reserve(kDefaultWorklistSize);
}
diff --git a/compiler/optimizing/ssa_test.cc b/compiler/optimizing/ssa_test.cc
index ac998db..e08904e 100644
--- a/compiler/optimizing/ssa_test.cc
+++ b/compiler/optimizing/ssa_test.cc
@@ -29,7 +29,10 @@
namespace art {
-class SsaTest : public CommonCompilerTest {};
+class SsaTest : public OptimizingUnitTest {
+ protected:
+ void TestCode(const uint16_t* data, const char* expected);
+};
class SsaPrettyPrinter : public HPrettyPrinter {
public:
@@ -77,10 +80,8 @@
}
}
-static void TestCode(const uint16_t* data, const char* expected) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+void SsaTest::TestCode(const uint16_t* data, const char* expected) {
+ HGraph* graph = CreateCFG(data);
// Suspend checks implementation may change in the future, and this test relies
// on how instructions are ordered.
RemoveSuspendChecks(graph);
diff --git a/compiler/optimizing/suspend_check_test.cc b/compiler/optimizing/suspend_check_test.cc
index 15cd4e8..88336b0 100644
--- a/compiler/optimizing/suspend_check_test.cc
+++ b/compiler/optimizing/suspend_check_test.cc
@@ -28,10 +28,13 @@
* Check that the HGraphBuilder adds suspend checks to backward branches.
*/
-static void TestCode(const uint16_t* data) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
+class SuspendCheckTest : public OptimizingUnitTest {
+ protected:
+ void TestCode(const uint16_t* data);
+};
+
+void SuspendCheckTest::TestCode(const uint16_t* data) {
+ HGraph* graph = CreateCFG(data);
HBasicBlock* first_block = graph->GetEntryBlock()->GetSingleSuccessor();
HBasicBlock* loop_header = first_block->GetSingleSuccessor();
ASSERT_TRUE(loop_header->IsLoopHeader());
@@ -39,8 +42,6 @@
ASSERT_TRUE(loop_header->GetFirstInstruction()->IsSuspendCheck());
}
-class SuspendCheckTest : public CommonCompilerTest {};
-
TEST_F(SuspendCheckTest, CFG1) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::NOP,
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index 70f290d..1e9a521 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -57,11 +57,11 @@
#endif
static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
- ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset32 offset) {
+ ArenaAllocator* allocator, EntryPointCallingConvention abi, ThreadOffset32 offset) {
using vixl::aarch32::MemOperand;
using vixl::aarch32::pc;
using vixl::aarch32::r0;
- ArmVIXLAssembler assembler(arena);
+ ArmVIXLAssembler assembler(allocator);
switch (abi) {
case kInterpreterAbi: // Thread* is first argument (R0) in interpreter ABI.
@@ -98,8 +98,8 @@
#ifdef ART_ENABLE_CODEGEN_arm64
namespace arm64 {
static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
- ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset64 offset) {
- Arm64Assembler assembler(arena);
+ ArenaAllocator* allocator, EntryPointCallingConvention abi, ThreadOffset64 offset) {
+ Arm64Assembler assembler(allocator);
switch (abi) {
case kInterpreterAbi: // Thread* is first argument (X0) in interpreter ABI.
@@ -137,8 +137,8 @@
#ifdef ART_ENABLE_CODEGEN_mips
namespace mips {
static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
- ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset32 offset) {
- MipsAssembler assembler(arena);
+ ArenaAllocator* allocator, EntryPointCallingConvention abi, ThreadOffset32 offset) {
+ MipsAssembler assembler(allocator);
switch (abi) {
case kInterpreterAbi: // Thread* is first argument (A0) in interpreter ABI.
@@ -169,8 +169,8 @@
#ifdef ART_ENABLE_CODEGEN_mips64
namespace mips64 {
static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
- ArenaAllocator* arena, EntryPointCallingConvention abi, ThreadOffset64 offset) {
- Mips64Assembler assembler(arena);
+ ArenaAllocator* allocator, EntryPointCallingConvention abi, ThreadOffset64 offset) {
+ Mips64Assembler assembler(allocator);
switch (abi) {
case kInterpreterAbi: // Thread* is first argument (A0) in interpreter ABI.
@@ -200,9 +200,9 @@
#ifdef ART_ENABLE_CODEGEN_x86
namespace x86 {
-static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* arena,
+static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* allocator,
ThreadOffset32 offset) {
- X86Assembler assembler(arena);
+ X86Assembler assembler(allocator);
// All x86 trampolines call via the Thread* held in fs.
__ fs()->jmp(Address::Absolute(offset));
@@ -221,9 +221,9 @@
#ifdef ART_ENABLE_CODEGEN_x86_64
namespace x86_64 {
-static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* arena,
+static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* allocator,
ThreadOffset64 offset) {
- x86_64::X86_64Assembler assembler(arena);
+ x86_64::X86_64Assembler assembler(allocator);
// All x86 trampolines call via the Thread* held in gs.
__ gs()->jmp(x86_64::Address::Absolute(offset, true));
diff --git a/compiler/utils/arm/assembler_arm_vixl.h b/compiler/utils/arm/assembler_arm_vixl.h
index 9c11fd3..0e73e6b 100644
--- a/compiler/utils/arm/assembler_arm_vixl.h
+++ b/compiler/utils/arm/assembler_arm_vixl.h
@@ -151,8 +151,8 @@
private:
class ArmException;
public:
- explicit ArmVIXLAssembler(ArenaAllocator* arena)
- : Assembler(arena) {
+ explicit ArmVIXLAssembler(ArenaAllocator* allocator)
+ : Assembler(allocator) {
// Use Thumb2 instruction set.
vixl_masm_.UseT32();
}
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
index ed57ca6..0bae4d4 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
@@ -117,7 +117,8 @@
}
void ArmVIXLJNIMacroAssembler::RemoveFrame(size_t frame_size,
- ArrayRef<const ManagedRegister> callee_save_regs) {
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ bool may_suspend) {
CHECK_ALIGNED(frame_size, kStackAlignment);
cfi().RememberState();
@@ -152,9 +153,33 @@
___ Pop(RegisterList(core_spill_mask));
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
- // Refresh Mark Register.
- // TODO: Refresh MR only if suspend is taken.
- ___ Ldr(mr, MemOperand(tr, Thread::IsGcMarkingOffset<kArmPointerSize>().Int32Value()));
+ if (may_suspend) {
+ // The method may be suspended; refresh the Marking Register.
+ ___ Ldr(mr, MemOperand(tr, Thread::IsGcMarkingOffset<kArmPointerSize>().Int32Value()));
+ } else {
+ // The method shall not be suspended; no need to refresh the Marking Register.
+
+ // Check that the Marking Register is a callee-save register,
+ // and thus has been preserved by native code following the
+ // AAPCS calling convention.
+ DCHECK_NE(core_spill_mask & (1 << MR), 0)
+ << "core_spill_mask should contain Marking Register R" << MR;
+
+ // The following condition is a compile-time one, so it does not have a run-time cost.
+ if (kIsDebugBuild) {
+ // The following condition is a run-time one; it is executed after the
+ // previous compile-time test, to avoid penalizing non-debug builds.
+ if (emit_run_time_checks_in_debug_mode_) {
+ // Emit a run-time check verifying that the Marking Register is up-to-date.
+ UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ // Ensure we are not clobbering a callee-save register that was restored before.
+ DCHECK_EQ(core_spill_mask & (1 << temp.GetCode()), 0)
+ << "core_spill_mask hould not contain scratch register R" << temp.GetCode();
+ asm_.GenerateMarkingRegisterCheck(temp);
+ }
+ }
+ }
}
// Return to LR.
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
index f3baf1f..e239004 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
@@ -35,9 +35,9 @@
private:
class ArmException;
public:
- explicit ArmVIXLJNIMacroAssembler(ArenaAllocator* arena)
- : JNIMacroAssemblerFwd(arena),
- exception_blocks_(arena->Adapter(kArenaAllocAssembler)) {}
+ explicit ArmVIXLJNIMacroAssembler(ArenaAllocator* allocator)
+ : JNIMacroAssemblerFwd(allocator),
+ exception_blocks_(allocator->Adapter(kArenaAllocAssembler)) {}
virtual ~ArmVIXLJNIMacroAssembler() {}
void FinalizeCode() OVERRIDE;
@@ -54,7 +54,8 @@
// Emit code that will remove an activation from the stack.
void RemoveFrame(size_t frame_size,
- ArrayRef<const ManagedRegister> callee_save_regs) OVERRIDE;
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ bool may_suspend) OVERRIDE;
void IncreaseFrameSize(size_t adjust) OVERRIDE;
void DecreaseFrameSize(size_t adjust) OVERRIDE;
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 6b28363..e5ec24a 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -61,7 +61,7 @@
class Arm64Assembler FINAL : public Assembler {
public:
- explicit Arm64Assembler(ArenaAllocator* arena) : Assembler(arena) {}
+ explicit Arm64Assembler(ArenaAllocator* allocator) : Assembler(allocator) {}
virtual ~Arm64Assembler() {}
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.cc b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
index 9732b76..573bb6d 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.cc
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
@@ -743,7 +743,8 @@
}
void Arm64JNIMacroAssembler::RemoveFrame(size_t frame_size,
- ArrayRef<const ManagedRegister> callee_save_regs) {
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ bool may_suspend) {
// Setup VIXL CPURegList for callee-saves.
CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0);
CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0);
@@ -773,10 +774,36 @@
asm_.UnspillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size);
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
- // Refresh Mark Register.
- // TODO: Refresh MR only if suspend is taken.
- ___ Ldr(reg_w(MR),
- MemOperand(reg_x(TR), Thread::IsGcMarkingOffset<kArm64PointerSize>().Int32Value()));
+ vixl::aarch64::Register mr = reg_x(MR); // Marking Register.
+ vixl::aarch64::Register tr = reg_x(TR); // Thread Register.
+
+ if (may_suspend) {
+ // The method may be suspended; refresh the Marking Register.
+ ___ Ldr(mr.W(), MemOperand(tr, Thread::IsGcMarkingOffset<kArm64PointerSize>().Int32Value()));
+ } else {
+ // The method shall not be suspended; no need to refresh the Marking Register.
+
+ // Check that the Marking Register is a callee-save register,
+ // and thus has been preserved by native code following the
+ // AAPCS64 calling convention.
+ DCHECK(core_reg_list.IncludesAliasOf(mr))
+ << "core_reg_list should contain Marking Register X" << mr.GetCode();
+
+ // The following condition is a compile-time one, so it does not have a run-time cost.
+ if (kIsDebugBuild) {
+ // The following condition is a run-time one; it is executed after the
+ // previous compile-time test, to avoid penalizing non-debug builds.
+ if (emit_run_time_checks_in_debug_mode_) {
+ // Emit a run-time check verifying that the Marking Register is up-to-date.
+ UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+ Register temp = temps.AcquireW();
+ // Ensure we are not clobbering a callee-save register that was restored before.
+ DCHECK(!core_reg_list.IncludesAliasOf(temp.X()))
+ << "core_reg_list should not contain scratch register X" << temp.GetCode();
+ asm_.GenerateMarkingRegisterCheck(temp);
+ }
+ }
+ }
}
// Decrease frame size to start of callee saved regs.
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.h b/compiler/utils/arm64/jni_macro_assembler_arm64.h
index baf0434..fda87aa 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.h
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.h
@@ -40,9 +40,9 @@
class Arm64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<Arm64Assembler, PointerSize::k64> {
public:
- explicit Arm64JNIMacroAssembler(ArenaAllocator* arena)
- : JNIMacroAssemblerFwd(arena),
- exception_blocks_(arena->Adapter(kArenaAllocAssembler)) {}
+ explicit Arm64JNIMacroAssembler(ArenaAllocator* allocator)
+ : JNIMacroAssemblerFwd(allocator),
+ exception_blocks_(allocator->Adapter(kArenaAllocAssembler)) {}
~Arm64JNIMacroAssembler();
@@ -56,8 +56,9 @@
const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
// Emit code that will remove an activation from the stack.
- void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
- OVERRIDE;
+ void RemoveFrame(size_t frame_size,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ bool may_suspend) OVERRIDE;
void IncreaseFrameSize(size_t adjust) OVERRIDE;
void DecreaseFrameSize(size_t adjust) OVERRIDE;
diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc
index 25eca23..944c64b 100644
--- a/compiler/utils/assembler.cc
+++ b/compiler/utils/assembler.cc
@@ -25,10 +25,10 @@
namespace art {
-AssemblerBuffer::AssemblerBuffer(ArenaAllocator* arena)
- : arena_(arena) {
+AssemblerBuffer::AssemblerBuffer(ArenaAllocator* allocator)
+ : allocator_(allocator) {
static const size_t kInitialBufferCapacity = 4 * KB;
- contents_ = arena_->AllocArray<uint8_t>(kInitialBufferCapacity, kArenaAllocAssembler);
+ contents_ = allocator_->AllocArray<uint8_t>(kInitialBufferCapacity, kArenaAllocAssembler);
cursor_ = contents_;
limit_ = ComputeLimit(contents_, kInitialBufferCapacity);
fixup_ = nullptr;
@@ -45,8 +45,8 @@
AssemblerBuffer::~AssemblerBuffer() {
- if (arena_->IsRunningOnMemoryTool()) {
- arena_->MakeInaccessible(contents_, Capacity());
+ if (allocator_->IsRunningOnMemoryTool()) {
+ allocator_->MakeInaccessible(contents_, Capacity());
}
}
@@ -81,7 +81,7 @@
// Allocate the new data area and copy contents of the old one to it.
contents_ = reinterpret_cast<uint8_t*>(
- arena_->Realloc(contents_, old_capacity, new_capacity, kArenaAllocAssembler));
+ allocator_->Realloc(contents_, old_capacity, new_capacity, kArenaAllocAssembler));
// Update the cursor and recompute the limit.
cursor_ = contents_ + old_size;
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 314ff8c..dbd35ab 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -89,11 +89,11 @@
class AssemblerBuffer {
public:
- explicit AssemblerBuffer(ArenaAllocator* arena);
+ explicit AssemblerBuffer(ArenaAllocator* allocator);
~AssemblerBuffer();
- ArenaAllocator* GetArena() {
- return arena_;
+ ArenaAllocator* GetAllocator() {
+ return allocator_;
}
// Basic support for emitting, loading, and storing.
@@ -252,7 +252,7 @@
// for a single, fast space check per instruction.
static const int kMinimumGap = 32;
- ArenaAllocator* arena_;
+ ArenaAllocator* allocator_;
uint8_t* contents_;
uint8_t* cursor_;
uint8_t* limit_;
@@ -392,8 +392,8 @@
*/
DebugFrameOpCodeWriterForAssembler& cfi() { return cfi_; }
- ArenaAllocator* GetArena() {
- return buffer_.GetArena();
+ ArenaAllocator* GetAllocator() {
+ return buffer_.GetAllocator();
}
AssemblerBuffer* GetBuffer() {
@@ -401,7 +401,7 @@
}
protected:
- explicit Assembler(ArenaAllocator* arena) : buffer_(arena), cfi_(this) {}
+ explicit Assembler(ArenaAllocator* allocator) : buffer_(allocator), cfi_(this) {}
AssemblerBuffer buffer_;
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index 227954e..11a9b91 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -741,8 +741,8 @@
}
// Override this to set up any architecture-specific things, e.g., CPU revision.
- virtual Ass* CreateAssembler(ArenaAllocator* arena) {
- return new (arena) Ass(arena);
+ virtual Ass* CreateAssembler(ArenaAllocator* allocator) {
+ return new (allocator) Ass(allocator);
}
// Override this to set up any architecture-specific things, e.g., register vectors.
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 4dbe71b..5622f89 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -285,7 +285,7 @@
__ DecreaseFrameSize(4096);
__ DecreaseFrameSize(32);
- __ RemoveFrame(frame_size, callee_save_regs);
+ __ RemoveFrame(frame_size, callee_save_regs, /* may_suspend */ true);
EmitAndCheck(&assembler, "VixlJniHelpers");
}
diff --git a/compiler/utils/jni_macro_assembler.cc b/compiler/utils/jni_macro_assembler.cc
index 3ac6c3c..0616b35 100644
--- a/compiler/utils/jni_macro_assembler.cc
+++ b/compiler/utils/jni_macro_assembler.cc
@@ -47,7 +47,7 @@
template <>
MacroAsm32UniquePtr JNIMacroAssembler<PointerSize::k32>::Create(
- ArenaAllocator* arena,
+ ArenaAllocator* allocator,
InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features) {
#ifndef ART_ENABLE_CODEGEN_mips
@@ -58,19 +58,19 @@
#ifdef ART_ENABLE_CODEGEN_arm
case kArm:
case kThumb2:
- return MacroAsm32UniquePtr(new (arena) arm::ArmVIXLJNIMacroAssembler(arena));
+ return MacroAsm32UniquePtr(new (allocator) arm::ArmVIXLJNIMacroAssembler(allocator));
#endif
#ifdef ART_ENABLE_CODEGEN_mips
case kMips:
- return MacroAsm32UniquePtr(new (arena) mips::MipsAssembler(
- arena,
+ return MacroAsm32UniquePtr(new (allocator) mips::MipsAssembler(
+ allocator,
instruction_set_features != nullptr
? instruction_set_features->AsMipsInstructionSetFeatures()
: nullptr));
#endif
#ifdef ART_ENABLE_CODEGEN_x86
case kX86:
- return MacroAsm32UniquePtr(new (arena) x86::X86JNIMacroAssembler(arena));
+ return MacroAsm32UniquePtr(new (allocator) x86::X86JNIMacroAssembler(allocator));
#endif
default:
LOG(FATAL) << "Unknown/unsupported 4B InstructionSet: " << instruction_set;
@@ -82,7 +82,7 @@
template <>
MacroAsm64UniquePtr JNIMacroAssembler<PointerSize::k64>::Create(
- ArenaAllocator* arena,
+ ArenaAllocator* allocator,
InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features) {
#ifndef ART_ENABLE_CODEGEN_mips64
@@ -92,22 +92,22 @@
switch (instruction_set) {
#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64:
- return MacroAsm64UniquePtr(new (arena) arm64::Arm64JNIMacroAssembler(arena));
+ return MacroAsm64UniquePtr(new (allocator) arm64::Arm64JNIMacroAssembler(allocator));
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64:
- return MacroAsm64UniquePtr(new (arena) mips64::Mips64Assembler(
- arena,
+ return MacroAsm64UniquePtr(new (allocator) mips64::Mips64Assembler(
+ allocator,
instruction_set_features != nullptr
? instruction_set_features->AsMips64InstructionSetFeatures()
: nullptr));
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64:
- return MacroAsm64UniquePtr(new (arena) x86_64::X86_64JNIMacroAssembler(arena));
+ return MacroAsm64UniquePtr(new (allocator) x86_64::X86_64JNIMacroAssembler(allocator));
#endif
default:
- UNUSED(arena);
+ UNUSED(allocator);
LOG(FATAL) << "Unknown/unsupported 8B InstructionSet: " << instruction_set;
UNREACHABLE();
}
diff --git a/compiler/utils/jni_macro_assembler.h b/compiler/utils/jni_macro_assembler.h
index a8ca111..0fc1353 100644
--- a/compiler/utils/jni_macro_assembler.h
+++ b/compiler/utils/jni_macro_assembler.h
@@ -46,7 +46,7 @@
class JNIMacroAssembler : public DeletableArenaObject<kArenaAllocAssembler> {
public:
static std::unique_ptr<JNIMacroAssembler<kPointerSize>> Create(
- ArenaAllocator* arena,
+ ArenaAllocator* allocator,
InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features = nullptr);
@@ -66,7 +66,13 @@
const ManagedRegisterEntrySpills& entry_spills) = 0;
// Emit code that will remove an activation from the stack
- virtual void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs) = 0;
+ //
+ // Argument `may_suspend` must be `true` if the compiled method may be
+ // suspended during its execution (otherwise `false`, if it is impossible
+ // to suspend during its execution).
+ virtual void RemoveFrame(size_t frame_size,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ bool may_suspend) = 0;
virtual void IncreaseFrameSize(size_t adjust) = 0;
virtual void DecreaseFrameSize(size_t adjust) = 0;
@@ -269,7 +275,7 @@
}
protected:
- explicit JNIMacroAssemblerFwd(ArenaAllocator* arena) : asm_(arena) {}
+ explicit JNIMacroAssemblerFwd(ArenaAllocator* allocator) : asm_(allocator) {}
T asm_;
};
diff --git a/compiler/utils/jni_macro_assembler_test.h b/compiler/utils/jni_macro_assembler_test.h
index 6129680..ba95e21 100644
--- a/compiler/utils/jni_macro_assembler_test.h
+++ b/compiler/utils/jni_macro_assembler_test.h
@@ -80,8 +80,8 @@
}
// Override this to set up any architecture-specific things, e.g., CPU revision.
- virtual Ass* CreateAssembler(ArenaAllocator* arena) {
- return new (arena) Ass(arena);
+ virtual Ass* CreateAssembler(ArenaAllocator* allocator) {
+ return new (allocator) Ass(allocator);
}
// Override this to set up any architecture-specific things, e.g., register vectors.
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index b300cc5..b83e3f5 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -5016,7 +5016,8 @@
}
void MipsAssembler::RemoveFrame(size_t frame_size,
- ArrayRef<const ManagedRegister> callee_save_regs) {
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ bool may_suspend ATTRIBUTE_UNUSED) {
CHECK_ALIGNED(frame_size, kStackAlignment);
DCHECK(!overwriting_);
cfi_.RememberState();
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 0b4eb9c..57b3edd 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -192,16 +192,16 @@
public:
using JNIBase = JNIMacroAssembler<PointerSize::k32>;
- explicit MipsAssembler(ArenaAllocator* arena,
+ explicit MipsAssembler(ArenaAllocator* allocator,
const MipsInstructionSetFeatures* instruction_set_features = nullptr)
- : Assembler(arena),
+ : Assembler(allocator),
overwriting_(false),
overwrite_location_(0),
reordering_(true),
ds_fsm_state_(kExpectingLabel),
ds_fsm_target_pc_(0),
- literals_(arena->Adapter(kArenaAllocAssembler)),
- jump_tables_(arena->Adapter(kArenaAllocAssembler)),
+ literals_(allocator->Adapter(kArenaAllocAssembler)),
+ jump_tables_(allocator->Adapter(kArenaAllocAssembler)),
last_position_adjustment_(0),
last_old_position_(0),
last_branch_id_(0),
@@ -1090,8 +1090,9 @@
const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
// Emit code that will remove an activation from the stack.
- void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
- OVERRIDE;
+ void RemoveFrame(size_t frame_size,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ bool may_suspend) OVERRIDE;
void IncreaseFrameSize(size_t adjust) OVERRIDE;
void DecreaseFrameSize(size_t adjust) OVERRIDE;
diff --git a/compiler/utils/mips/assembler_mips32r5_test.cc b/compiler/utils/mips/assembler_mips32r5_test.cc
index a3662db..9a69ffd 100644
--- a/compiler/utils/mips/assembler_mips32r5_test.cc
+++ b/compiler/utils/mips/assembler_mips32r5_test.cc
@@ -72,8 +72,8 @@
return " -D -bbinary -mmips:isa32r5";
}
- mips::MipsAssembler* CreateAssembler(ArenaAllocator* arena) OVERRIDE {
- return new (arena) mips::MipsAssembler(arena, instruction_set_features_.get());
+ mips::MipsAssembler* CreateAssembler(ArenaAllocator* allocator) OVERRIDE {
+ return new (allocator) mips::MipsAssembler(allocator, instruction_set_features_.get());
}
void SetUpHelpers() OVERRIDE {
diff --git a/compiler/utils/mips/assembler_mips32r6_test.cc b/compiler/utils/mips/assembler_mips32r6_test.cc
index b6cb30a..b12b6b6 100644
--- a/compiler/utils/mips/assembler_mips32r6_test.cc
+++ b/compiler/utils/mips/assembler_mips32r6_test.cc
@@ -85,8 +85,8 @@
return " -D -bbinary -mmips:isa32r6";
}
- mips::MipsAssembler* CreateAssembler(ArenaAllocator* arena) OVERRIDE {
- return new (arena) mips::MipsAssembler(arena, instruction_set_features_.get());
+ mips::MipsAssembler* CreateAssembler(ArenaAllocator* allocator) OVERRIDE {
+ return new (allocator) mips::MipsAssembler(allocator, instruction_set_features_.get());
}
void SetUpHelpers() OVERRIDE {
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 183b5e5..606d4c3 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -3406,7 +3406,8 @@
}
void Mips64Assembler::RemoveFrame(size_t frame_size,
- ArrayRef<const ManagedRegister> callee_save_regs) {
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ bool may_suspend ATTRIBUTE_UNUSED) {
CHECK_ALIGNED(frame_size, kStackAlignment);
DCHECK(!overwriting_);
cfi_.RememberState();
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index bb54382..a3787ac 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -418,14 +418,14 @@
public:
using JNIBase = JNIMacroAssembler<PointerSize::k64>;
- explicit Mips64Assembler(ArenaAllocator* arena,
+ explicit Mips64Assembler(ArenaAllocator* allocator,
const Mips64InstructionSetFeatures* instruction_set_features = nullptr)
- : Assembler(arena),
+ : Assembler(allocator),
overwriting_(false),
overwrite_location_(0),
- literals_(arena->Adapter(kArenaAllocAssembler)),
- long_literals_(arena->Adapter(kArenaAllocAssembler)),
- jump_tables_(arena->Adapter(kArenaAllocAssembler)),
+ literals_(allocator->Adapter(kArenaAllocAssembler)),
+ long_literals_(allocator->Adapter(kArenaAllocAssembler)),
+ jump_tables_(allocator->Adapter(kArenaAllocAssembler)),
last_position_adjustment_(0),
last_old_position_(0),
last_branch_id_(0),
@@ -1278,7 +1278,9 @@
const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
// Emit code that will remove an activation from the stack.
- void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs) OVERRIDE;
+ void RemoveFrame(size_t frame_size,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ bool may_suspend) OVERRIDE;
void IncreaseFrameSize(size_t adjust) OVERRIDE;
void DecreaseFrameSize(size_t adjust) OVERRIDE;
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index 16a36f9..bf0326d 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -83,8 +83,8 @@
return " -D -bbinary -mmips:isa64r6";
}
- mips64::Mips64Assembler* CreateAssembler(ArenaAllocator* arena) OVERRIDE {
- return new (arena) mips64::Mips64Assembler(arena, instruction_set_features_.get());
+ mips64::Mips64Assembler* CreateAssembler(ArenaAllocator* allocator) OVERRIDE {
+ return new (allocator) mips64::Mips64Assembler(allocator, instruction_set_features_.get());
}
void SetUpHelpers() OVERRIDE {
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index dce3ad2..f3b516c 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -266,7 +266,8 @@
*/
class ConstantArea {
public:
- explicit ConstantArea(ArenaAllocator* arena) : buffer_(arena->Adapter(kArenaAllocAssembler)) {}
+ explicit ConstantArea(ArenaAllocator* allocator)
+ : buffer_(allocator->Adapter(kArenaAllocAssembler)) {}
// Add a double to the constant area, returning the offset into
// the constant area where the literal resides.
@@ -307,7 +308,8 @@
class X86Assembler FINAL : public Assembler {
public:
- explicit X86Assembler(ArenaAllocator* arena) : Assembler(arena), constant_area_(arena) {}
+ explicit X86Assembler(ArenaAllocator* allocator)
+ : Assembler(allocator), constant_area_(allocator) {}
virtual ~X86Assembler() {}
/*
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.cc b/compiler/utils/x86/jni_macro_assembler_x86.cc
index e074346..7e29c4a 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.cc
+++ b/compiler/utils/x86/jni_macro_assembler_x86.cc
@@ -85,7 +85,8 @@
}
void X86JNIMacroAssembler::RemoveFrame(size_t frame_size,
- ArrayRef<const ManagedRegister> spill_regs) {
+ ArrayRef<const ManagedRegister> spill_regs,
+ bool may_suspend ATTRIBUTE_UNUSED) {
CHECK_ALIGNED(frame_size, kStackAlignment);
cfi().RememberState();
// -kFramePointerSize for ArtMethod*.
@@ -517,7 +518,7 @@
}
void X86JNIMacroAssembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
- X86ExceptionSlowPath* slow = new (__ GetArena()) X86ExceptionSlowPath(stack_adjust);
+ X86ExceptionSlowPath* slow = new (__ GetAllocator()) X86ExceptionSlowPath(stack_adjust);
__ GetBuffer()->EnqueueSlowPath(slow);
__ fs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>()), Immediate(0));
__ j(kNotEqual, slow->Entry());
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.h b/compiler/utils/x86/jni_macro_assembler_x86.h
index 8ffda64..56eaf19 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.h
+++ b/compiler/utils/x86/jni_macro_assembler_x86.h
@@ -34,7 +34,7 @@
class X86JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86Assembler, PointerSize::k32> {
public:
- explicit X86JNIMacroAssembler(ArenaAllocator* arena) : JNIMacroAssemblerFwd(arena) {}
+ explicit X86JNIMacroAssembler(ArenaAllocator* allocator) : JNIMacroAssemblerFwd(allocator) {}
virtual ~X86JNIMacroAssembler() {}
//
@@ -48,8 +48,9 @@
const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
// Emit code that will remove an activation from the stack
- void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
- OVERRIDE;
+ void RemoveFrame(size_t frame_size,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ bool may_suspend) OVERRIDE;
void IncreaseFrameSize(size_t adjust) OVERRIDE;
void DecreaseFrameSize(size_t adjust) OVERRIDE;
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 1130444..0d24a75 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -290,7 +290,8 @@
*/
class ConstantArea {
public:
- explicit ConstantArea(ArenaAllocator* arena) : buffer_(arena->Adapter(kArenaAllocAssembler)) {}
+ explicit ConstantArea(ArenaAllocator* allocator)
+ : buffer_(allocator->Adapter(kArenaAllocAssembler)) {}
// Add a double to the constant area, returning the offset into
// the constant area where the literal resides.
@@ -352,7 +353,8 @@
class X86_64Assembler FINAL : public Assembler {
public:
- explicit X86_64Assembler(ArenaAllocator* arena) : Assembler(arena), constant_area_(arena) {}
+ explicit X86_64Assembler(ArenaAllocator* allocator)
+ : Assembler(allocator), constant_area_(allocator) {}
virtual ~X86_64Assembler() {}
/*
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index aff8871..b08ba4a 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -2043,7 +2043,7 @@
ArrayRef<const ManagedRegister> spill_regs(raw_spill_regs);
size_t frame_size = 10 * kStackAlignment;
- assembler->RemoveFrame(frame_size, spill_regs);
+ assembler->RemoveFrame(frame_size, spill_regs, /* may_suspend */ true);
// Construct assembly text counterpart.
std::ostringstream str;
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
index ec86254..5766f9d 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
@@ -100,7 +100,8 @@
}
void X86_64JNIMacroAssembler::RemoveFrame(size_t frame_size,
- ArrayRef<const ManagedRegister> spill_regs) {
+ ArrayRef<const ManagedRegister> spill_regs,
+ bool may_suspend ATTRIBUTE_UNUSED) {
CHECK_ALIGNED(frame_size, kStackAlignment);
cfi().RememberState();
int gpr_count = 0;
@@ -583,9 +584,10 @@
};
void X86_64JNIMacroAssembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
- X86_64ExceptionSlowPath* slow = new (__ GetArena()) X86_64ExceptionSlowPath(stack_adjust);
+ X86_64ExceptionSlowPath* slow = new (__ GetAllocator()) X86_64ExceptionSlowPath(stack_adjust);
__ GetBuffer()->EnqueueSlowPath(slow);
- __ gs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true), Immediate(0));
+ __ gs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true),
+ Immediate(0));
__ j(kNotEqual, slow->Entry());
}
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
index aa058f7..d1a3032 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
@@ -34,8 +34,8 @@
class X86_64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86_64Assembler,
PointerSize::k64> {
public:
- explicit X86_64JNIMacroAssembler(ArenaAllocator* arena)
- : JNIMacroAssemblerFwd<X86_64Assembler, PointerSize::k64>(arena) {}
+ explicit X86_64JNIMacroAssembler(ArenaAllocator* allocator)
+ : JNIMacroAssemblerFwd<X86_64Assembler, PointerSize::k64>(allocator) {}
virtual ~X86_64JNIMacroAssembler() {}
//
@@ -49,8 +49,9 @@
const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
// Emit code that will remove an activation from the stack
- void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
- OVERRIDE;
+ void RemoveFrame(size_t frame_size,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ bool may_suspend) OVERRIDE;
void IncreaseFrameSize(size_t adjust) OVERRIDE;
void DecreaseFrameSize(size_t adjust) OVERRIDE;
diff --git a/dalvikvm/Android.bp b/dalvikvm/Android.bp
index 0405fe1..cca9ac4 100644
--- a/dalvikvm/Android.bp
+++ b/dalvikvm/Android.bp
@@ -36,7 +36,7 @@
],
ldflags: ["-Wl,--export-dynamic"],
},
- linux: {
+ linux_glibc: {
ldflags: ["-Wl,--export-dynamic"],
},
},
diff --git a/dex2oat/Android.bp b/dex2oat/Android.bp
index c9125df..a93b0e7 100644
--- a/dex2oat/Android.bp
+++ b/dex2oat/Android.bp
@@ -91,7 +91,10 @@
name: "dex2oat-defaults",
host_supported: true,
defaults: ["art_defaults"],
- srcs: ["dex2oat.cc"],
+ srcs: [
+ "dex2oat_options.cc",
+ "dex2oat.cc",
+ ],
target: {
android: {
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 7b46531..528cf3a 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -20,11 +20,13 @@
#include <sys/stat.h>
#include "base/memory_tool.h"
+#include <forward_list>
#include <fstream>
#include <iostream>
#include <limits>
#include <sstream>
#include <string>
+#include <type_traits>
#include <unordered_set>
#include <vector>
@@ -50,16 +52,19 @@
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
#include "class_loader_context.h"
+#include "cmdline_parser.h"
#include "compiler.h"
#include "compiler_callbacks.h"
#include "debug/elf_debug_writer.h"
#include "debug/method_debug_info.h"
#include "dex/quick_compiler_callbacks.h"
#include "dex/verification_results.h"
+#include "dex2oat_options.h"
#include "dex2oat_return_codes.h"
#include "dex_file-inl.h"
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
+#include "driver/compiler_options_map-inl.h"
#include "elf_file.h"
#include "gc/space/image_space.h"
#include "gc/space/space-inl.h"
@@ -235,6 +240,13 @@
UsageError(" --oat-fd=<number>: specifies the oat output destination via a file descriptor.");
UsageError(" Example: --oat-fd=6");
UsageError("");
+ UsageError(" --input-vdex-fd=<number>: specifies the vdex input source via a file descriptor.");
+ UsageError(" Example: --input-vdex-fd=6");
+ UsageError("");
+ UsageError(" --output-vdex-fd=<number>: specifies the vdex output destination via a file");
+ UsageError(" descriptor.");
+ UsageError(" Example: --output-vdex-fd=6");
+ UsageError("");
UsageError(" --oat-location=<oat-name>: specifies a symbolic name for the file corresponding");
UsageError(" to the file descriptor specified by --oat-fd.");
UsageError(" Example: --oat-location=/data/dalvik-cache/system@app@Calculator.apk.oat");
@@ -659,76 +671,27 @@
std::string error_msg;
};
- void ParseZipFd(const StringPiece& option) {
- ParseUintOption(option, "--zip-fd", &zip_fd_, Usage);
- }
-
- void ParseInputVdexFd(const StringPiece& option) {
- // Note that the input vdex fd might be -1.
- ParseIntOption(option, "--input-vdex-fd", &input_vdex_fd_, Usage);
- }
-
- void ParseOutputVdexFd(const StringPiece& option) {
- ParseUintOption(option, "--output-vdex-fd", &output_vdex_fd_, Usage);
- }
-
- void ParseOatFd(const StringPiece& option) {
- ParseUintOption(option, "--oat-fd", &oat_fd_, Usage);
- }
-
- void ParseFdForCollection(const StringPiece& option,
- const char* arg_name,
- std::vector<uint32_t>* fds) {
- uint32_t fd;
- ParseUintOption(option, arg_name, &fd, Usage);
- fds->push_back(fd);
- }
-
- void ParseJ(const StringPiece& option) {
- ParseUintOption(option, "-j", &thread_count_, Usage, /* is_long_option */ false);
- }
-
- void ParseBase(const StringPiece& option) {
- DCHECK(option.starts_with("--base="));
- const char* image_base_str = option.substr(strlen("--base=")).data();
+ void ParseBase(const std::string& option) {
char* end;
- image_base_ = strtoul(image_base_str, &end, 16);
- if (end == image_base_str || *end != '\0') {
+ image_base_ = strtoul(option.c_str(), &end, 16);
+ if (end == option.c_str() || *end != '\0') {
Usage("Failed to parse hexadecimal value for option %s", option.data());
}
}
- void ParseInstructionSet(const StringPiece& option) {
- DCHECK(option.starts_with("--instruction-set="));
- StringPiece instruction_set_str = option.substr(strlen("--instruction-set=")).data();
- // StringPiece is not necessarily zero-terminated, so need to make a copy and ensure it.
- std::unique_ptr<char[]> buf(new char[instruction_set_str.length() + 1]);
- strncpy(buf.get(), instruction_set_str.data(), instruction_set_str.length());
- buf.get()[instruction_set_str.length()] = 0;
- instruction_set_ = GetInstructionSetFromString(buf.get());
- // arm actually means thumb2.
- if (instruction_set_ == InstructionSet::kArm) {
- instruction_set_ = InstructionSet::kThumb2;
- }
- }
-
bool VerifyProfileData() {
return profile_compilation_info_->VerifyProfileData(dex_files_);
}
- void ParseInstructionSetVariant(const StringPiece& option, ParserOptions* parser_options) {
- DCHECK(option.starts_with("--instruction-set-variant="));
- StringPiece str = option.substr(strlen("--instruction-set-variant=")).data();
+ void ParseInstructionSetVariant(const std::string& option, ParserOptions* parser_options) {
instruction_set_features_ = InstructionSetFeatures::FromVariant(
- instruction_set_, str.as_string(), &parser_options->error_msg);
+ instruction_set_, option, &parser_options->error_msg);
if (instruction_set_features_.get() == nullptr) {
Usage("%s", parser_options->error_msg.c_str());
}
}
- void ParseInstructionSetFeatures(const StringPiece& option, ParserOptions* parser_options) {
- DCHECK(option.starts_with("--instruction-set-features="));
- StringPiece str = option.substr(strlen("--instruction-set-features=")).data();
+ void ParseInstructionSetFeatures(const std::string& option, ParserOptions* parser_options) {
if (instruction_set_features_ == nullptr) {
instruction_set_features_ = InstructionSetFeatures::FromVariant(
instruction_set_, "default", &parser_options->error_msg);
@@ -738,38 +701,9 @@
}
}
instruction_set_features_ =
- instruction_set_features_->AddFeaturesFromString(str.as_string(),
- &parser_options->error_msg);
+ instruction_set_features_->AddFeaturesFromString(option, &parser_options->error_msg);
if (instruction_set_features_ == nullptr) {
- Usage("Error parsing '%s': %s", option.data(), parser_options->error_msg.c_str());
- }
- }
-
- void ParseCompilerBackend(const StringPiece& option, ParserOptions* parser_options) {
- DCHECK(option.starts_with("--compiler-backend="));
- parser_options->requested_specific_compiler = true;
- StringPiece backend_str = option.substr(strlen("--compiler-backend=")).data();
- if (backend_str == "Quick") {
- compiler_kind_ = Compiler::kQuick;
- } else if (backend_str == "Optimizing") {
- compiler_kind_ = Compiler::kOptimizing;
- } else {
- Usage("Unknown compiler backend: %s", backend_str.data());
- }
- }
-
- void ParseImageFormat(const StringPiece& option) {
- const StringPiece substr("--image-format=");
- DCHECK(option.starts_with(substr));
- const StringPiece format_str = option.substr(substr.length());
- if (format_str == "lz4") {
- image_storage_mode_ = ImageHeader::kStorageModeLZ4;
- } else if (format_str == "lz4hc") {
- image_storage_mode_ = ImageHeader::kStorageModeLZ4HC;
- } else if (format_str == "uncompressed") {
- image_storage_mode_ = ImageHeader::kStorageModeUncompressed;
- } else {
- Usage("Unknown image format: %s", format_str.data());
+ Usage("Error parsing '%s': %s", option.c_str(), parser_options->error_msg.c_str());
}
}
@@ -1092,23 +1026,20 @@
base_symbol_oat = base_symbol_oat.substr(0, last_symbol_oat_slash + 1);
}
- const size_t num_expanded_files = 2 + (base_symbol_oat.empty() ? 0 : 1);
- char_backing_storage_.reserve((dex_locations_.size() - 1) * num_expanded_files);
-
// Now create the other names. Use a counted loop to skip the first one.
for (size_t i = 1; i < dex_locations_.size(); ++i) {
// TODO: Make everything properly std::string.
std::string image_name = CreateMultiImageName(dex_locations_[i], prefix, infix, ".art");
- char_backing_storage_.push_back(base_img + image_name);
- image_filenames_.push_back((char_backing_storage_.end() - 1)->c_str());
+ char_backing_storage_.push_front(base_img + image_name);
+ image_filenames_.push_back(char_backing_storage_.front().c_str());
std::string oat_name = CreateMultiImageName(dex_locations_[i], prefix, infix, ".oat");
- char_backing_storage_.push_back(base_oat + oat_name);
- oat_filenames_.push_back((char_backing_storage_.end() - 1)->c_str());
+ char_backing_storage_.push_front(base_oat + oat_name);
+ oat_filenames_.push_back(char_backing_storage_.front().c_str());
if (!base_symbol_oat.empty()) {
- char_backing_storage_.push_back(base_symbol_oat + oat_name);
- oat_unstripped_.push_back((char_backing_storage_.end() - 1)->c_str());
+ char_backing_storage_.push_front(base_symbol_oat + oat_name);
+ oat_unstripped_.push_back(char_backing_storage_.front().c_str());
}
}
}
@@ -1173,6 +1104,43 @@
kUseReadBarrier ? OatHeader::kTrueValue : OatHeader::kFalseValue);
}
+ // This simple forward is here so the string specializations below don't look out of place.
+ template <typename T, typename U>
+ void AssignIfExists(Dex2oatArgumentMap& map,
+ const Dex2oatArgumentMap::Key<T>& key,
+ U* out) {
+ map.AssignIfExists(key, out);
+ }
+
+ // Specializations to handle const char* vs std::string.
+ void AssignIfExists(Dex2oatArgumentMap& map,
+ const Dex2oatArgumentMap::Key<std::string>& key,
+ const char** out) {
+ if (map.Exists(key)) {
+ char_backing_storage_.push_front(std::move(*map.Get(key)));
+ *out = char_backing_storage_.front().c_str();
+ }
+ }
+ void AssignIfExists(Dex2oatArgumentMap& map,
+ const Dex2oatArgumentMap::Key<std::vector<std::string>>& key,
+ std::vector<const char*>* out) {
+ if (map.Exists(key)) {
+ for (auto& val : *map.Get(key)) {
+ char_backing_storage_.push_front(std::move(val));
+ out->push_back(char_backing_storage_.front().c_str());
+ }
+ }
+ }
+
+ template <typename T>
+ void AssignTrueIfExists(Dex2oatArgumentMap& map,
+ const Dex2oatArgumentMap::Key<T>& key,
+ bool* out) {
+ if (map.Exists(key)) {
+ *out = true;
+ }
+ }
+
// Parse the arguments from the command line. In case of an unrecognized option or impossible
// values/combinations, a usage error will be displayed and exit() is called. Thus, if the method
// returns, arguments have been successfully parsed.
@@ -1182,159 +1150,104 @@
InitLogging(argv, Runtime::Abort);
- // Skip over argv[0].
- argv++;
- argc--;
-
- if (argc == 0) {
- Usage("No arguments specified");
- }
-
- std::unique_ptr<ParserOptions> parser_options(new ParserOptions());
compiler_options_.reset(new CompilerOptions());
- for (int i = 0; i < argc; i++) {
- const StringPiece option(argv[i]);
- const bool log_options = false;
- if (log_options) {
- LOG(INFO) << "dex2oat: option[" << i << "]=" << argv[i];
+ using M = Dex2oatArgumentMap;
+ std::string error_msg;
+ std::unique_ptr<M> args_uptr = M::Parse(argc, const_cast<const char**>(argv), &error_msg);
+ if (args_uptr == nullptr) {
+ Usage("Failed to parse command line: %s", error_msg.c_str());
+ UNREACHABLE();
+ }
+
+ M& args = *args_uptr;
+
+ std::unique_ptr<ParserOptions> parser_options(new ParserOptions());
+
+ AssignIfExists(args, M::DexFiles, &dex_filenames_);
+ AssignIfExists(args, M::DexLocations, &dex_locations_);
+ AssignIfExists(args, M::OatFiles, &oat_filenames_);
+ AssignIfExists(args, M::OatSymbols, &parser_options->oat_symbols);
+ AssignIfExists(args, M::ImageFilenames, &image_filenames_);
+ AssignIfExists(args, M::ZipFd, &zip_fd_);
+ AssignIfExists(args, M::ZipLocation, &zip_location_);
+ AssignIfExists(args, M::InputVdexFd, &input_vdex_fd_);
+ AssignIfExists(args, M::OutputVdexFd, &output_vdex_fd_);
+ AssignIfExists(args, M::InputVdex, &input_vdex_);
+ AssignIfExists(args, M::OutputVdex, &output_vdex_);
+ AssignIfExists(args, M::OatFd, &oat_fd_);
+ AssignIfExists(args, M::OatLocation, &oat_location_);
+ AssignIfExists(args, M::Watchdog, &parser_options->watch_dog_enabled);
+ AssignIfExists(args, M::WatchdogTimeout, &parser_options->watch_dog_timeout_in_ms);
+ AssignIfExists(args, M::Threads, &thread_count_);
+ AssignIfExists(args, M::ImageClasses, &image_classes_filename_);
+ AssignIfExists(args, M::ImageClassesZip, &image_classes_zip_filename_);
+ AssignIfExists(args, M::CompiledClasses, &compiled_classes_filename_);
+ AssignIfExists(args, M::CompiledClassesZip, &compiled_classes_zip_filename_);
+ AssignIfExists(args, M::CompiledMethods, &compiled_methods_filename_);
+ AssignIfExists(args, M::CompiledMethodsZip, &compiled_methods_zip_filename_);
+ AssignIfExists(args, M::Passes, &passes_to_run_filename_);
+ AssignIfExists(args, M::BootImage, &parser_options->boot_image_filename);
+ AssignIfExists(args, M::AndroidRoot, &android_root_);
+ AssignIfExists(args, M::Profile, &profile_file_);
+ AssignIfExists(args, M::ProfileFd, &profile_file_fd_);
+ AssignIfExists(args, M::RuntimeOptions, &runtime_args_);
+ AssignIfExists(args, M::SwapFile, &swap_file_name_);
+ AssignIfExists(args, M::SwapFileFd, &swap_fd_);
+ AssignIfExists(args, M::SwapDexSizeThreshold, &min_dex_file_cumulative_size_for_swap_);
+ AssignIfExists(args, M::SwapDexCountThreshold, &min_dex_files_for_swap_);
+ AssignIfExists(args, M::VeryLargeAppThreshold, &very_large_threshold_);
+ AssignIfExists(args, M::AppImageFile, &app_image_file_name_);
+ AssignIfExists(args, M::AppImageFileFd, &app_image_fd_);
+ AssignIfExists(args, M::NoInlineFrom, &no_inline_from_string_);
+ AssignIfExists(args, M::ClasspathDir, &classpath_dir_);
+ AssignIfExists(args, M::DirtyImageObjects, &dirty_image_objects_filename_);
+ AssignIfExists(args, M::ImageFormat, &image_storage_mode_);
+
+ AssignIfExists(args, M::Backend, &compiler_kind_);
+ parser_options->requested_specific_compiler = args.Exists(M::Backend);
+
+ AssignIfExists(args, M::TargetInstructionSet, &instruction_set_);
+ // arm actually means thumb2.
+ if (instruction_set_ == InstructionSet::kArm) {
+ instruction_set_ = InstructionSet::kThumb2;
+ }
+
+ AssignTrueIfExists(args, M::Host, &is_host_);
+ AssignTrueIfExists(args, M::DumpTiming, &dump_timing_);
+ AssignTrueIfExists(args, M::DumpPasses, &dump_passes_);
+ AssignTrueIfExists(args, M::DumpStats, &dump_stats_);
+ AssignTrueIfExists(args, M::AvoidStoringInvocation, &avoid_storing_invocation_);
+ AssignTrueIfExists(args, M::MultiImage, &multi_image_);
+
+ if (args.Exists(M::ForceDeterminism)) {
+ if (!SupportsDeterministicCompilation()) {
+ Usage("Option --force-determinism requires read barriers or a CMS/MS garbage collector");
}
- if (option.starts_with("--dex-file=")) {
- dex_filenames_.push_back(option.substr(strlen("--dex-file=")).data());
- } else if (option.starts_with("--dex-location=")) {
- dex_locations_.push_back(option.substr(strlen("--dex-location=")).data());
- } else if (option.starts_with("--zip-fd=")) {
- ParseZipFd(option);
- } else if (option.starts_with("--zip-location=")) {
- zip_location_ = option.substr(strlen("--zip-location=")).data();
- } else if (option.starts_with("--input-vdex-fd=")) {
- ParseInputVdexFd(option);
- } else if (option.starts_with("--input-vdex=")) {
- input_vdex_ = option.substr(strlen("--input-vdex=")).data();
- } else if (option.starts_with("--output-vdex=")) {
- output_vdex_ = option.substr(strlen("--output-vdex=")).data();
- } else if (option.starts_with("--output-vdex-fd=")) {
- ParseOutputVdexFd(option);
- } else if (option.starts_with("--oat-file=")) {
- oat_filenames_.push_back(option.substr(strlen("--oat-file=")).data());
- } else if (option.starts_with("--oat-symbols=")) {
- parser_options->oat_symbols.push_back(option.substr(strlen("--oat-symbols=")).data());
- } else if (option.starts_with("--oat-fd=")) {
- ParseOatFd(option);
- } else if (option.starts_with("--oat-location=")) {
- oat_location_ = option.substr(strlen("--oat-location=")).data();
- } else if (option == "--watch-dog") {
- parser_options->watch_dog_enabled = true;
- } else if (option == "--no-watch-dog") {
- parser_options->watch_dog_enabled = false;
- } else if (option.starts_with("--watchdog-timeout=")) {
- ParseIntOption(option,
- "--watchdog-timeout",
- &parser_options->watch_dog_timeout_in_ms,
- Usage);
- } else if (option.starts_with("-j")) {
- ParseJ(option);
- } else if (option.starts_with("--image=")) {
- image_filenames_.push_back(option.substr(strlen("--image=")).data());
- } else if (option.starts_with("--image-classes=")) {
- image_classes_filename_ = option.substr(strlen("--image-classes=")).data();
- } else if (option.starts_with("--image-classes-zip=")) {
- image_classes_zip_filename_ = option.substr(strlen("--image-classes-zip=")).data();
- } else if (option.starts_with("--image-format=")) {
- ParseImageFormat(option);
- } else if (option.starts_with("--compiled-classes=")) {
- compiled_classes_filename_ = option.substr(strlen("--compiled-classes=")).data();
- } else if (option.starts_with("--compiled-classes-zip=")) {
- compiled_classes_zip_filename_ = option.substr(strlen("--compiled-classes-zip=")).data();
- } else if (option.starts_with("--compiled-methods=")) {
- compiled_methods_filename_ = option.substr(strlen("--compiled-methods=")).data();
- } else if (option.starts_with("--compiled-methods-zip=")) {
- compiled_methods_zip_filename_ = option.substr(strlen("--compiled-methods-zip=")).data();
- } else if (option.starts_with("--run-passes=")) {
- passes_to_run_filename_ = option.substr(strlen("--run-passes=")).data();
- } else if (option.starts_with("--base=")) {
- ParseBase(option);
- } else if (option.starts_with("--boot-image=")) {
- parser_options->boot_image_filename = option.substr(strlen("--boot-image=")).data();
- } else if (option.starts_with("--android-root=")) {
- android_root_ = option.substr(strlen("--android-root=")).data();
- } else if (option.starts_with("--instruction-set=")) {
- ParseInstructionSet(option);
- } else if (option.starts_with("--instruction-set-variant=")) {
- ParseInstructionSetVariant(option, parser_options.get());
- } else if (option.starts_with("--instruction-set-features=")) {
- ParseInstructionSetFeatures(option, parser_options.get());
- } else if (option.starts_with("--compiler-backend=")) {
- ParseCompilerBackend(option, parser_options.get());
- } else if (option.starts_with("--profile-file=")) {
- profile_file_ = option.substr(strlen("--profile-file=")).ToString();
- } else if (option.starts_with("--profile-file-fd=")) {
- ParseUintOption(option, "--profile-file-fd", &profile_file_fd_, Usage);
- } else if (option == "--host") {
- is_host_ = true;
- } else if (option == "--runtime-arg") {
- if (++i >= argc) {
- Usage("Missing required argument for --runtime-arg");
- }
- if (log_options) {
- LOG(INFO) << "dex2oat: option[" << i << "]=" << argv[i];
- }
- runtime_args_.push_back(argv[i]);
- } else if (option == "--dump-timing") {
- dump_timing_ = true;
- } else if (option == "--dump-passes") {
- dump_passes_ = true;
- } else if (option == "--dump-stats") {
- dump_stats_ = true;
- } else if (option == "--avoid-storing-invocation") {
- avoid_storing_invocation_ = true;
- } else if (option.starts_with("--swap-file=")) {
- swap_file_name_ = option.substr(strlen("--swap-file=")).data();
- } else if (option.starts_with("--swap-fd=")) {
- ParseUintOption(option, "--swap-fd", &swap_fd_, Usage);
- } else if (option.starts_with("--swap-dex-size-threshold=")) {
- ParseUintOption(option,
- "--swap-dex-size-threshold",
- &min_dex_file_cumulative_size_for_swap_,
- Usage);
- } else if (option.starts_with("--swap-dex-count-threshold=")) {
- ParseUintOption(option,
- "--swap-dex-count-threshold",
- &min_dex_files_for_swap_,
- Usage);
- } else if (option.starts_with("--very-large-app-threshold=")) {
- ParseUintOption(option,
- "--very-large-app-threshold",
- &very_large_threshold_,
- Usage);
- } else if (option.starts_with("--app-image-file=")) {
- app_image_file_name_ = option.substr(strlen("--app-image-file=")).data();
- } else if (option.starts_with("--app-image-fd=")) {
- ParseUintOption(option, "--app-image-fd", &app_image_fd_, Usage);
- } else if (option == "--multi-image") {
- multi_image_ = true;
- } else if (option.starts_with("--no-inline-from=")) {
- no_inline_from_string_ = option.substr(strlen("--no-inline-from=")).data();
- } else if (option == "--force-determinism") {
- if (!SupportsDeterministicCompilation()) {
- Usage("Option --force-determinism requires read barriers or a CMS/MS garbage collector");
- }
- force_determinism_ = true;
- } else if (option.starts_with("--classpath-dir=")) {
- classpath_dir_ = option.substr(strlen("--classpath-dir=")).data();
- } else if (option.starts_with("--class-loader-context=")) {
- class_loader_context_ = ClassLoaderContext::Create(
- option.substr(strlen("--class-loader-context=")).data());
- if (class_loader_context_ == nullptr) {
- Usage("Option --class-loader-context has an incorrect format: %s", option.data());
- }
- } else if (option.starts_with("--dirty-image-objects=")) {
- dirty_image_objects_filename_ = option.substr(strlen("--dirty-image-objects=")).data();
- } else if (!compiler_options_->ParseCompilerOption(option, Usage)) {
- Usage("Unknown argument %s", option.data());
+ force_determinism_ = true;
+ }
+
+ if (args.Exists(M::Base)) {
+ ParseBase(*args.Get(M::Base));
+ }
+ if (args.Exists(M::TargetInstructionSetVariant)) {
+ ParseInstructionSetVariant(*args.Get(M::TargetInstructionSetVariant), parser_options.get());
+ }
+ if (args.Exists(M::TargetInstructionSetFeatures)) {
+ ParseInstructionSetFeatures(*args.Get(M::TargetInstructionSetFeatures), parser_options.get());
+ }
+ if (args.Exists(M::ClassLoaderContext)) {
+ class_loader_context_ = ClassLoaderContext::Create(*args.Get(M::ClassLoaderContext));
+ if (class_loader_context_ == nullptr) {
+ Usage("Option --class-loader-context has an incorrect format: %s",
+ args.Get(M::ClassLoaderContext)->c_str());
}
}
+ if (!ReadCompilerOptions(args, compiler_options_.get(), &error_msg)) {
+ Usage(error_msg.c_str());
+ }
+
ProcessOptions(parser_options.get());
// Insert some compiler things.
@@ -2931,7 +2844,7 @@
std::unordered_map<const DexFile*, size_t> dex_file_oat_index_map_;
// Backing storage.
- std::vector<std::string> char_backing_storage_;
+ std::forward_list<std::string> char_backing_storage_;
// See CompilerOptions.force_determinism_.
bool force_determinism_;
diff --git a/dex2oat/dex2oat_image_test.cc b/dex2oat/dex2oat_image_test.cc
index 7ddf1c1..f20e934 100644
--- a/dex2oat/dex2oat_image_test.cc
+++ b/dex2oat/dex2oat_image_test.cc
@@ -311,7 +311,11 @@
{
ProfileCompilationInfo profile;
VisitLibcoreDexes([&profile](MethodReference ref) {
- EXPECT_TRUE(profile.AddMethodIndex(ProfileCompilationInfo::MethodHotness::kFlagHot, ref));
+ uint32_t flags = ProfileCompilationInfo::MethodHotness::kFlagHot |
+ ProfileCompilationInfo::MethodHotness::kFlagStartup;
+ EXPECT_TRUE(profile.AddMethodIndex(
+ static_cast<ProfileCompilationInfo::MethodHotness::Flag>(flags),
+ ref));
}, [&profile](TypeReference ref) {
EXPECT_TRUE(profile.AddClassForDex(ref));
}, kMethodFrequency, kTypeFrequency);
@@ -324,8 +328,8 @@
profile_file.Close();
std::cout << "Profile sizes " << profile_sizes << std::endl;
// Since there is some difference between profile vs image + methods due to layout, check that
- // the range is within expected margins (+-5%).
- const double kRatio = 0.95;
+ // the range is within expected margins (+-10%).
+ const double kRatio = 0.90;
EXPECT_LE(profile_sizes.art_size * kRatio, compiled_methods_sizes.art_size);
// TODO(mathieuc): Find a reliable way to check compiled code. b/63746626
// EXPECT_LE(profile_sizes.oat_size * kRatio, compiled_methods_sizes.oat_size);
diff --git a/dex2oat/dex2oat_options.cc b/dex2oat/dex2oat_options.cc
new file mode 100644
index 0000000..43e6c4d
--- /dev/null
+++ b/dex2oat/dex2oat_options.cc
@@ -0,0 +1,268 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dex2oat_options.h"
+
+#include <memory>
+
+#include "cmdline_parser.h"
+#include "driver/compiler_options_map-inl.h"
+
+namespace art {
+
+template<>
+struct CmdlineType<InstructionSet> : CmdlineTypeParser<InstructionSet> {
+ Result Parse(const std::string& option) {
+ InstructionSet set = GetInstructionSetFromString(option.c_str());
+ if (set == kNone) {
+ return Result::Failure(std::string("Not a valid instruction set: '") + option + "'");
+ }
+ return Result::Success(set);
+ }
+
+ static const char* Name() { return "InstructionSet"; }
+};
+
+#define COMPILER_OPTIONS_MAP_TYPE Dex2oatArgumentMap
+#define COMPILER_OPTIONS_MAP_KEY_TYPE Dex2oatArgumentMapKey
+#include "driver/compiler_options_map-storage.h"
+
+// Specify storage for the Dex2oatOptions keys.
+
+#define DEX2OAT_OPTIONS_KEY(Type, Name, ...) \
+ const Dex2oatArgumentMap::Key<Type> Dex2oatArgumentMap::Name {__VA_ARGS__}; // NOLINT [readability/braces] [4]
+#include "dex2oat_options.def"
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wframe-larger-than="
+
+using M = Dex2oatArgumentMap;
+using Parser = CmdlineParser<Dex2oatArgumentMap, Dex2oatArgumentMap::Key>;
+using Builder = Parser::Builder;
+
+static void AddInputMappings(Builder& builder) {
+ builder.
+ Define("--dex-file=_")
+ .WithType<std::vector<std::string>>().AppendValues()
+ .IntoKey(M::DexFiles)
+ .Define("--dex-location=_")
+ .WithType<std::vector<std::string>>().AppendValues()
+ .IntoKey(M::DexLocations)
+ .Define("--zip-fd=_")
+ .WithType<int>()
+ .IntoKey(M::ZipFd)
+ .Define("--zip-location=_")
+ .WithType<std::string>()
+ .IntoKey(M::ZipLocation)
+ .Define("--boot-image=_")
+ .WithType<std::string>()
+ .IntoKey(M::BootImage);
+}
+
+static void AddGeneratedArtifactMappings(Builder& builder) {
+ builder.
+ Define("--input-vdex-fd=_")
+ .WithType<int>()
+ .IntoKey(M::InputVdexFd)
+ .Define("--input-vdex=_")
+ .WithType<std::string>()
+ .IntoKey(M::InputVdex)
+ .Define("--output-vdex-fd=_")
+ .WithType<int>()
+ .IntoKey(M::OutputVdexFd)
+ .Define("--output-vdex=_")
+ .WithType<std::string>()
+ .IntoKey(M::OutputVdex)
+ .Define("--oat-file=_")
+ .WithType<std::vector<std::string>>().AppendValues()
+ .IntoKey(M::OatFiles)
+ .Define("--oat-symbols=_")
+ .WithType<std::vector<std::string>>().AppendValues()
+ .IntoKey(M::OatSymbols)
+ .Define("--oat-fd=_")
+ .WithType<int>()
+ .IntoKey(M::OatFd)
+ .Define("--oat-location=_")
+ .WithType<std::string>()
+ .IntoKey(M::OatLocation);
+}
+
+static void AddImageMappings(Builder& builder) {
+ builder.
+ Define("--image=_")
+ .WithType<std::vector<std::string>>().AppendValues()
+ .IntoKey(M::ImageFilenames)
+ .Define("--image-classes=_")
+ .WithType<std::string>()
+ .IntoKey(M::ImageClasses)
+ .Define("--image-classes-zip=_")
+ .WithType<std::string>()
+ .IntoKey(M::ImageClassesZip)
+ .Define("--base=_")
+ .WithType<std::string>()
+ .IntoKey(M::Base)
+ .Define("--app-image-file=_")
+ .WithType<std::string>()
+ .IntoKey(M::AppImageFile)
+ .Define("--app-image-fd=_")
+ .WithType<int>()
+ .IntoKey(M::AppImageFileFd)
+ .Define("--multi-image")
+ .IntoKey(M::MultiImage)
+ .Define("--dirty-image-objects=_")
+ .WithType<std::string>()
+ .IntoKey(M::DirtyImageObjects)
+ .Define("--image-format=_")
+ .WithType<ImageHeader::StorageMode>()
+ .WithValueMap({{"lz4", ImageHeader::kStorageModeLZ4},
+ {"lz4hc", ImageHeader::kStorageModeLZ4HC},
+ {"uncompressed", ImageHeader::kStorageModeUncompressed}})
+ .IntoKey(M::ImageFormat);
+}
+
+static void AddSwapMappings(Builder& builder) {
+ builder.
+ Define("--swap-file=_")
+ .WithType<std::string>()
+ .IntoKey(M::SwapFile)
+ .Define("--swap-fd=_")
+ .WithType<int>()
+ .IntoKey(M::SwapFileFd)
+ .Define("--swap-dex-size-threshold=_")
+ .WithType<unsigned int>()
+ .IntoKey(M::SwapDexSizeThreshold)
+ .Define("--swap-dex-count-threshold=_")
+ .WithType<unsigned int>()
+ .IntoKey(M::SwapDexCountThreshold);
+}
+
+static void AddCompilerMappings(Builder& builder) {
+ builder.
+ Define("--compiled-classes=_")
+ .WithType<std::string>()
+ .IntoKey(M::CompiledClasses)
+ .Define("--compiled-classes-zip=_")
+ .WithType<std::string>()
+ .IntoKey(M::CompiledClassesZip)
+ .Define("--compiled-methods=_")
+ .WithType<std::string>()
+ .IntoKey(M::CompiledMethods)
+ .Define("--compiled-methods-zip=_")
+ .WithType<std::string>()
+ .IntoKey(M::CompiledMethodsZip)
+ .Define("--run-passes=_")
+ .WithType<std::string>()
+ .IntoKey(M::Passes)
+ .Define("--profile-file=_")
+ .WithType<std::string>()
+ .IntoKey(M::Profile)
+ .Define("--profile-file-fd=_")
+ .WithType<int>()
+ .IntoKey(M::ProfileFd)
+ .Define("--no-inline-from=_")
+ .WithType<std::string>()
+ .IntoKey(M::NoInlineFrom);
+}
+
+static void AddTargetMappings(Builder& builder) {
+ builder.
+ Define("--instruction-set=_")
+ .WithType<InstructionSet>()
+ .IntoKey(M::TargetInstructionSet)
+ .Define("--instruction-set-variant=_")
+ .WithType<std::string>()
+ .IntoKey(M::TargetInstructionSetVariant)
+ .Define("--instruction-set-features=_")
+ .WithType<std::string>()
+ .IntoKey(M::TargetInstructionSetFeatures);
+}
+
+static Parser CreateArgumentParser() {
+ std::unique_ptr<Builder> parser_builder = std::unique_ptr<Builder>(new Builder());
+
+ AddInputMappings(*parser_builder);
+ AddGeneratedArtifactMappings(*parser_builder);
+ AddImageMappings(*parser_builder);
+ AddSwapMappings(*parser_builder);
+ AddCompilerMappings(*parser_builder);
+ AddTargetMappings(*parser_builder);
+
+ parser_builder->
+ Define({"--watch-dog", "--no-watch-dog"})
+ .WithValues({true, false})
+ .IntoKey(M::Watchdog)
+ .Define("--watchdog-timeout=_")
+ .WithType<int>()
+ .IntoKey(M::WatchdogTimeout)
+ .Define("-j_")
+ .WithType<unsigned int>()
+ .IntoKey(M::Threads)
+ .Define("--android-root=_")
+ .WithType<std::string>()
+ .IntoKey(M::AndroidRoot)
+ .Define("--compiler-backend=_")
+ .WithType<Compiler::Kind>()
+ .WithValueMap({{"Quick", Compiler::Kind::kQuick},
+ {"Optimizing", Compiler::Kind::kOptimizing}})
+ .IntoKey(M::Backend)
+ .Define("--host")
+ .IntoKey(M::Host)
+ .Define("--dump-timing")
+ .IntoKey(M::DumpTiming)
+ .Define("--dump-passes")
+ .IntoKey(M::DumpPasses)
+ .Define("--dump-stats")
+ .IntoKey(M::DumpStats)
+ .Define("--avoid-storing-invocation")
+ .IntoKey(M::AvoidStoringInvocation)
+ .Define("--very-large-app-threshold=_")
+ .WithType<unsigned int>()
+ .IntoKey(M::VeryLargeAppThreshold)
+ .Define("--force-determinism")
+ .IntoKey(M::ForceDeterminism)
+ .Define("--classpath-dir=_")
+ .WithType<std::string>()
+ .IntoKey(M::ClasspathDir)
+ .Define("--class-loader-context=_")
+ .WithType<std::string>()
+ .IntoKey(M::ClassLoaderContext)
+ .Define("--runtime-arg _")
+ .WithType<std::vector<std::string>>().AppendValues()
+ .IntoKey(M::RuntimeOptions);
+
+ AddCompilerOptionsArgumentParserOptions<Dex2oatArgumentMap>(*parser_builder);
+
+ parser_builder->IgnoreUnrecognized(false);
+
+ return parser_builder->Build();
+}
+
+#pragma GCC diagnostic pop
+
+std::unique_ptr<Dex2oatArgumentMap> Dex2oatArgumentMap::Parse(int argc,
+ const char** argv,
+ std::string* error_msg) {
+ Parser parser = CreateArgumentParser();
+ CmdlineResult parse_result = parser.Parse(argv, argc);
+ if (!parse_result.IsSuccess()) {
+ *error_msg = parse_result.GetMessage();
+ return nullptr;
+ }
+
+ return std::unique_ptr<Dex2oatArgumentMap>(new Dex2oatArgumentMap(parser.ReleaseArgumentsMap()));
+}
+
+} // namespace art
diff --git a/dex2oat/dex2oat_options.def b/dex2oat/dex2oat_options.def
new file mode 100644
index 0000000..83a3035
--- /dev/null
+++ b/dex2oat/dex2oat_options.def
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License")
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DEX2OAT_OPTIONS_KEY
+#error "Please #define DEX2OAT_OPTIONS_KEY before #including this file"
+#define DEX2OAT_OPTIONS_KEY(...) // Don't display errors in this file in IDEs.
+#endif
+
+// This file defines the list of keys for Dex2oatOptions.
+// These can be used with Dex2oatOptions.Get/Set/etc, for example:
+// Dex2oatOptions opt; bool* dex2oat_enabled = opt.Get(Dex2oatOptions::Dex2Oat);
+//
+// Column Descriptions:
+// <<Type>> <<Key Name>> <<Default Value>>
+//
+// Default values are only used by Map::GetOrDefault(K<T>).
+// If a default value is omitted here, T{} is used as the default value, which is
+// almost-always the value of the type as if it was memset to all 0.
+//
+// Please keep the columns aligned if possible when adding new rows.
+//
+
+// Parse-able keys from the command line.
+DEX2OAT_OPTIONS_KEY (std::vector<std::string>, DexFiles)
+DEX2OAT_OPTIONS_KEY (std::vector<std::string>, DexLocations)
+DEX2OAT_OPTIONS_KEY (int, ZipFd)
+DEX2OAT_OPTIONS_KEY (std::string, ZipLocation)
+DEX2OAT_OPTIONS_KEY (int, InputVdexFd)
+DEX2OAT_OPTIONS_KEY (std::string, InputVdex)
+DEX2OAT_OPTIONS_KEY (int, OutputVdexFd)
+DEX2OAT_OPTIONS_KEY (std::string, OutputVdex)
+DEX2OAT_OPTIONS_KEY (std::vector<std::string>, OatFiles)
+DEX2OAT_OPTIONS_KEY (std::vector<std::string>, OatSymbols)
+DEX2OAT_OPTIONS_KEY (int, OatFd)
+DEX2OAT_OPTIONS_KEY (std::string, OatLocation)
+DEX2OAT_OPTIONS_KEY (bool, Watchdog)
+DEX2OAT_OPTIONS_KEY (int, WatchdogTimeout)
+DEX2OAT_OPTIONS_KEY (unsigned int, Threads)
+DEX2OAT_OPTIONS_KEY (std::vector<std::string>, ImageFilenames)
+DEX2OAT_OPTIONS_KEY (std::string, ImageClasses)
+DEX2OAT_OPTIONS_KEY (std::string, ImageClassesZip)
+DEX2OAT_OPTIONS_KEY (ImageHeader::StorageMode, ImageFormat)
+DEX2OAT_OPTIONS_KEY (std::string, CompiledClasses)
+DEX2OAT_OPTIONS_KEY (std::string, CompiledClassesZip)
+DEX2OAT_OPTIONS_KEY (std::string, CompiledMethods)
+DEX2OAT_OPTIONS_KEY (std::string, CompiledMethodsZip)
+DEX2OAT_OPTIONS_KEY (std::string, Passes)
+DEX2OAT_OPTIONS_KEY (std::string, Base) // TODO: Hex string parsing.
+DEX2OAT_OPTIONS_KEY (std::string, BootImage)
+DEX2OAT_OPTIONS_KEY (std::string, AndroidRoot)
+DEX2OAT_OPTIONS_KEY (InstructionSet, TargetInstructionSet)
+DEX2OAT_OPTIONS_KEY (std::string, TargetInstructionSetVariant)
+DEX2OAT_OPTIONS_KEY (std::string, TargetInstructionSetFeatures)
+DEX2OAT_OPTIONS_KEY (Compiler::Kind, Backend)
+DEX2OAT_OPTIONS_KEY (std::string, Profile)
+DEX2OAT_OPTIONS_KEY (int, ProfileFd)
+DEX2OAT_OPTIONS_KEY (Unit, Host)
+DEX2OAT_OPTIONS_KEY (Unit, DumpTiming)
+DEX2OAT_OPTIONS_KEY (Unit, DumpPasses)
+DEX2OAT_OPTIONS_KEY (Unit, DumpStats)
+DEX2OAT_OPTIONS_KEY (Unit, AvoidStoringInvocation)
+DEX2OAT_OPTIONS_KEY (std::string, SwapFile)
+DEX2OAT_OPTIONS_KEY (int, SwapFileFd)
+DEX2OAT_OPTIONS_KEY (unsigned int, SwapDexSizeThreshold)
+DEX2OAT_OPTIONS_KEY (unsigned int, SwapDexCountThreshold)
+DEX2OAT_OPTIONS_KEY (unsigned int, VeryLargeAppThreshold)
+DEX2OAT_OPTIONS_KEY (std::string, AppImageFile)
+DEX2OAT_OPTIONS_KEY (int, AppImageFileFd)
+DEX2OAT_OPTIONS_KEY (Unit, MultiImage)
+DEX2OAT_OPTIONS_KEY (std::string, NoInlineFrom)
+DEX2OAT_OPTIONS_KEY (Unit, ForceDeterminism)
+DEX2OAT_OPTIONS_KEY (std::string, ClasspathDir)
+DEX2OAT_OPTIONS_KEY (std::string, ClassLoaderContext)
+DEX2OAT_OPTIONS_KEY (std::string, DirtyImageObjects)
+DEX2OAT_OPTIONS_KEY (std::vector<std::string>, RuntimeOptions)
+
+#undef DEX2OAT_OPTIONS_KEY
diff --git a/dex2oat/dex2oat_options.h b/dex2oat/dex2oat_options.h
new file mode 100644
index 0000000..a4c7186
--- /dev/null
+++ b/dex2oat/dex2oat_options.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_DEX2OAT_DEX2OAT_OPTIONS_H_
+#define ART_DEX2OAT_DEX2OAT_OPTIONS_H_
+
+#include <cstdio>
+#include <string>
+#include <vector>
+
+#include "base/variant_map.h"
+#include "cmdline_types.h" // TODO: don't need to include this file here
+#include "compiler.h"
+#include "driver/compiler_options_map.h"
+#include "image.h"
+
+namespace art {
+
+template <typename TVariantMap,
+ template <typename TKeyValue> class TVariantMapKey>
+struct CmdlineParser;
+
+// Define a key that is usable with a Dex2oatArgumentMap.
+// This key will *not* work with other subtypes of VariantMap.
+template <typename TValue>
+struct Dex2oatArgumentMapKey : VariantMapKey<TValue> {
+ Dex2oatArgumentMapKey() {}
+ explicit Dex2oatArgumentMapKey(TValue default_value)
+ : VariantMapKey<TValue>(std::move(default_value)) {}
+ // Don't ODR-use constexpr default values, which means that Struct::Fields
+ // that are declared 'static constexpr T Name = Value' don't need to have a matching definition.
+};
+
+// Defines a type-safe heterogeneous key->value map.
+// Use the VariantMap interface to look up or to store a Dex2oatArgumentMapKey,Value pair.
+//
+// Example:
+// auto map = Dex2oatArgumentMap();
+// map.Set(Dex2oatArgumentMap::ZipFd, -1);
+// int *target_utilization = map.Get(Dex2oatArgumentMap::ZipFd);
+//
+struct Dex2oatArgumentMap : CompilerOptionsMap<Dex2oatArgumentMap, Dex2oatArgumentMapKey> {
+ // This 'using' line is necessary to inherit the variadic constructor.
+ using CompilerOptionsMap<Dex2oatArgumentMap, Dex2oatArgumentMapKey>::CompilerOptionsMap;
+
+ static std::unique_ptr<Dex2oatArgumentMap> Parse(int argc,
+ const char** argv,
+ std::string* error_msg);
+
+ // Make the next many usages of Key slightly shorter to type.
+ template <typename TValue>
+ using Key = Dex2oatArgumentMapKey<TValue>;
+
+ // List of key declarations, shorthand for 'static const Key<T> Name'
+#define DEX2OAT_OPTIONS_KEY(Type, Name, ...) static const Key<Type> (Name);
+#include "dex2oat_options.def"
+};
+
+extern template struct CompilerOptionsMap<Dex2oatArgumentMap, Dex2oatArgumentMapKey>;
+
+} // namespace art
+
+#endif // ART_DEX2OAT_DEX2OAT_OPTIONS_H_
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index a80dbf6..dfbe31a 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -961,8 +961,8 @@
if (method_hotness.IsHot() &&
!method_hotness.IsStartup() && !method_hotness.IsPostStartup()) {
std::string name = method_reference.PrettyMethod();
- LOG(WARNING) << "Method " << name << " had a Hot method that wasn't marked "
- << "either start-up or post-startup. Possible corrupted profile?";
+ LOG(FATAL) << "Method " << name << " had a Hot method that wasn't marked "
+ << "either start-up or post-startup. Possible corrupted profile?";
// This is not fatal, so only warn.
}
}
diff --git a/dex2oat/linker/oat_writer_test.cc b/dex2oat/linker/oat_writer_test.cc
index d89d9f0..50434ef 100644
--- a/dex2oat/linker/oat_writer_test.cc
+++ b/dex2oat/linker/oat_writer_test.cc
@@ -48,16 +48,6 @@
namespace art {
namespace linker {
-NO_RETURN static void Usage(const char* fmt, ...) {
- va_list ap;
- va_start(ap, fmt);
- std::string error;
- android::base::StringAppendV(&error, fmt, ap);
- LOG(FATAL) << error;
- va_end(ap);
- UNREACHABLE();
-}
-
class OatTest : public CommonCompilerTest {
protected:
static const bool kCompile = false; // DISABLED_ due to the time to compile libcore
@@ -101,8 +91,11 @@
insn_features_ = InstructionSetFeatures::FromVariant(insn_set, "default", error_msg);
ASSERT_TRUE(insn_features_ != nullptr) << *error_msg;
compiler_options_.reset(new CompilerOptions);
- for (const std::string& option : compiler_options) {
- compiler_options_->ParseCompilerOption(option, Usage);
+ if (!compiler_options_->ParseCompilerOptions(compiler_options,
+ false /* ignore_unrecognized */,
+ error_msg)) {
+ LOG(FATAL) << *error_msg;
+ UNREACHABLE();
}
verification_results_.reset(new VerificationResults(compiler_options_.get()));
callbacks_.reset(new QuickCompilerCallbacks(CompilerCallbacks::CallbackMode::kCompileApp));
diff --git a/dexdump/Android.bp b/dexdump/Android.bp
index 60ce363..705043b 100644
--- a/dexdump/Android.bp
+++ b/dexdump/Android.bp
@@ -22,7 +22,7 @@
"dexdump_main.cc",
"dexdump.cc",
],
- cflags: ["-Wall"],
+ cflags: ["-Wall", "-Werror"],
shared_libs: [
"libart",
"libbase",
diff --git a/dexdump/dexdump.cc b/dexdump/dexdump.cc
index 0db790b..7599d23 100644
--- a/dexdump/dexdump.cc
+++ b/dexdump/dexdump.cc
@@ -906,14 +906,27 @@
// Call site information is too large to detail in disassembly so just output the index.
outSize = snprintf(buf.get(), bufSize, "call_site@%0*x", width, index);
break;
- // SOME NOT SUPPORTED:
- // case Instruction::kIndexVaries:
- // case Instruction::kIndexInlineMethod:
- default:
- outSize = snprintf(buf.get(), bufSize, "<?>");
+ case Instruction::kIndexMethodHandleRef:
+ // Method handle information is too large to detail in disassembly so just output the index.
+ outSize = snprintf(buf.get(), bufSize, "method_handle@%0*x", width, index);
+ break;
+ case Instruction::kIndexProtoRef:
+ if (index < pDexFile->GetHeader().proto_ids_size_) {
+ const DexFile::ProtoId& protoId = pDexFile->GetProtoId(index);
+ const Signature signature = pDexFile->GetProtoSignature(protoId);
+ const std::string& proto = signature.ToString();
+ outSize = snprintf(buf.get(), bufSize, "%s // proto@%0*x", proto.c_str(), width, index);
+ } else {
+ outSize = snprintf(buf.get(), bufSize, "<?> // proto@%0*x", width, index);
+ }
break;
} // switch
+ if (outSize == 0) {
+ // The index type has not been handled in the switch above.
+ outSize = snprintf(buf.get(), bufSize, "<?>");
+ }
+
// Determine success of string construction.
if (outSize >= bufSize) {
// The buffer wasn't big enough; retry with computed size. Note: snprintf()
diff --git a/dexlist/Android.bp b/dexlist/Android.bp
index 52b1ee9..03943bf 100644
--- a/dexlist/Android.bp
+++ b/dexlist/Android.bp
@@ -16,7 +16,7 @@
name: "dexlist",
host_supported: true,
srcs: ["dexlist.cc"],
- cflags: ["-Wall"],
+ cflags: ["-Wall", "-Werror"],
shared_libs: ["libart"],
}
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index d3b8ce1..f0c9158 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -1715,7 +1715,7 @@
*error_msg = StringPrintf("Failed to check process status: %s", strerror(errno));
}
return kParseError;
- } else if (instruction_set_ != kRuntimeISA) {
+ } else if (instruction_set_ != InstructionSet::kNone && instruction_set_ != kRuntimeISA) {
// Don't allow different ISAs since the images are ISA-specific.
// Right now the code assumes both the runtime ISA and the remote ISA are identical.
*error_msg = "Must use the default runtime ISA; changing ISA is not supported.";
diff --git a/oatdump/Android.mk b/oatdump/Android.mk
index aa07d24..906404b 100644
--- a/oatdump/Android.mk
+++ b/oatdump/Android.mk
@@ -83,10 +83,3 @@
ifdef TARGET_2ND_ARCH
dump-oat-boot: dump-oat-boot-$(TARGET_2ND_ARCH)
endif
-
-.PHONY: dump-oat-Calculator
-ifeq ($(ART_BUILD_TARGET_NDEBUG),true)
-dump-oat-Calculator: $(TARGET_OUT_APPS)/Calculator.odex $(DEFAULT_DEX_PREOPT_BUILT_IMAGE) $(OATDUMP)
- $(OATDUMP) --oat-file=$< --output=$(ART_DUMP_OAT_PATH)/Calculator.oatdump.txt
- @echo Output in $(ART_DUMP_OAT_PATH)/Calculator.oatdump.txt
-endif
diff --git a/openjdkjvmti/OpenjdkJvmTi.cc b/openjdkjvmti/OpenjdkJvmTi.cc
index bac57f9..b30d45a 100644
--- a/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/openjdkjvmti/OpenjdkJvmTi.cc
@@ -163,18 +163,16 @@
return ThreadUtil::ResumeThreadList(env, request_count, request_list, results);
}
- static jvmtiError StopThread(jvmtiEnv* env,
- jthread thread ATTRIBUTE_UNUSED,
- jobject exception ATTRIBUTE_UNUSED) {
+ static jvmtiError StopThread(jvmtiEnv* env, jthread thread, jobject exception) {
ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_signal_thread);
- return ERR(NOT_IMPLEMENTED);
+ return ThreadUtil::StopThread(env, thread, exception);
}
- static jvmtiError InterruptThread(jvmtiEnv* env, jthread thread ATTRIBUTE_UNUSED) {
+ static jvmtiError InterruptThread(jvmtiEnv* env, jthread thread) {
ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_signal_thread);
- return ERR(NOT_IMPLEMENTED);
+ return ThreadUtil::InterruptThread(env, thread);
}
static jvmtiError GetThreadInfo(jvmtiEnv* env, jthread thread, jvmtiThreadInfo* info_ptr) {
diff --git a/openjdkjvmti/art_jvmti.h b/openjdkjvmti/art_jvmti.h
index 10ddfc1..ad405e8 100644
--- a/openjdkjvmti/art_jvmti.h
+++ b/openjdkjvmti/art_jvmti.h
@@ -229,7 +229,7 @@
.can_get_monitor_info = 1,
.can_pop_frame = 0,
.can_redefine_classes = 1,
- .can_signal_thread = 0,
+ .can_signal_thread = 1,
.can_get_source_file_name = 1,
.can_get_line_numbers = 1,
.can_get_source_debug_extension = 1,
diff --git a/openjdkjvmti/events.cc b/openjdkjvmti/events.cc
index 32dd69e..0282fbc 100644
--- a/openjdkjvmti/events.cc
+++ b/openjdkjvmti/events.cc
@@ -842,6 +842,12 @@
bool operator()(art::ObjPtr<art::mirror::Class> klass)
OVERRIDE REQUIRES(art::Locks::mutator_lock_) {
+ if (!klass->IsLoaded()) {
+ // Skip classes that aren't loaded since they might not have fully allocated and initialized
+ // their methods. Furthemore since the jvmti-plugin must have been loaded by this point
+ // these methods will definitately be using debuggable code.
+ return true;
+ }
for (auto& m : klass->GetMethods(art::kRuntimePointerSize)) {
const void* code = m.GetEntryPointFromQuickCompiledCode();
if (m.IsNative() || m.IsProxyMethod()) {
diff --git a/openjdkjvmti/ti_method.cc b/openjdkjvmti/ti_method.cc
index f05977a..50402a0 100644
--- a/openjdkjvmti/ti_method.cc
+++ b/openjdkjvmti/ti_method.cc
@@ -572,8 +572,9 @@
return;
}
art::ArtMethod* method = visitor.GetMethod();
- if (method->IsNative()) {
- // TODO We really should support get/set for non-shadow frames.
+ // Native and 'art' proxy methods don't have registers.
+ if (method->IsNative() || method->IsProxyMethod()) {
+ // TODO It might be useful to fake up support for get at least on proxy frames.
result_ = ERR(OPAQUE_FRAME);
return;
} else if (method->GetCodeItem()->registers_size_ <= slot_) {
diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc
index 1b4e910..98fad80 100644
--- a/openjdkjvmti/ti_redefine.cc
+++ b/openjdkjvmti/ti_redefine.cc
@@ -1096,15 +1096,19 @@
hs.NewHandle(GetClassLoader()),
dex_file_->GetClassDef(0), /*class_def*/
nullptr, /*compiler_callbacks*/
- false, /*allow_soft_failures*/
+ true, /*allow_soft_failures*/
/*log_level*/
art::verifier::HardFailLogMode::kLogWarning,
&error);
- bool passes = failure == art::verifier::FailureKind::kNoFailure;
- if (!passes) {
- RecordFailure(ERR(FAILS_VERIFICATION), "Failed to verify class. Error was: " + error);
+ switch (failure) {
+ case art::verifier::FailureKind::kNoFailure:
+ case art::verifier::FailureKind::kSoftFailure:
+ return true;
+ case art::verifier::FailureKind::kHardFailure: {
+ RecordFailure(ERR(FAILS_VERIFICATION), "Failed to verify class. Error was: " + error);
+ return false;
+ }
}
- return passes;
}
// Looks through the previously allocated cookies to see if we need to update them with another new
@@ -1396,10 +1400,12 @@
linker->SetEntryPointsToInterpreter(&method);
method.SetCodeItemOffset(dex_file_->FindCodeItemOffset(class_def, dex_method_idx));
// Clear all the intrinsics related flags.
- method.ClearAccessFlags(art::kAccIntrinsic | (~art::kAccFlagsNotUsedByIntrinsic));
+ method.SetNotIntrinsic();
// Notify the jit that this method is redefined.
art::jit::Jit* jit = driver_->runtime_->GetJit();
- if (jit != nullptr) {
+ // Non-invokable methods don't have any JIT data associated with them so we don't need to tell
+ // the jit about them.
+ if (jit != nullptr && method.IsInvokable()) {
jit->GetCodeCache()->NotifyMethodRedefined(&method);
}
}
diff --git a/openjdkjvmti/ti_stack.cc b/openjdkjvmti/ti_stack.cc
index d4cc42a..e0c1399 100644
--- a/openjdkjvmti/ti_stack.cc
+++ b/openjdkjvmti/ti_stack.cc
@@ -789,7 +789,7 @@
}
*method_ptr = art::jni::EncodeArtMethod(closure.method);
- if (closure.method->IsNative()) {
+ if (closure.method->IsNative() || closure.method->IsProxyMethod()) {
*location_ptr = -1;
} else {
if (closure.dex_pc == art::dex::kDexNoIndex) {
diff --git a/openjdkjvmti/ti_thread.cc b/openjdkjvmti/ti_thread.cc
index 907b515..9a809df 100644
--- a/openjdkjvmti/ti_thread.cc
+++ b/openjdkjvmti/ti_thread.cc
@@ -949,4 +949,65 @@
return OK;
}
+jvmtiError ThreadUtil::StopThread(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ jthread thread,
+ jobject exception) {
+ art::Thread* self = art::Thread::Current();
+ art::ScopedObjectAccess soa(self);
+ art::StackHandleScope<1> hs(self);
+ if (exception == nullptr) {
+ return ERR(INVALID_OBJECT);
+ }
+ art::ObjPtr<art::mirror::Object> obj(soa.Decode<art::mirror::Object>(exception));
+ if (!obj->GetClass()->IsThrowableClass()) {
+ return ERR(INVALID_OBJECT);
+ }
+ art::Handle<art::mirror::Throwable> exc(hs.NewHandle(obj->AsThrowable()));
+ art::MutexLock tll_mu(self, *art::Locks::thread_list_lock_);
+ art::Thread* target = nullptr;
+ jvmtiError err = ERR(INTERNAL);
+ if (!GetAliveNativeThread(thread, soa, &target, &err)) {
+ return err;
+ } else if (target->GetState() == art::ThreadState::kStarting || target->IsStillStarting()) {
+ return ERR(THREAD_NOT_ALIVE);
+ }
+ struct StopThreadClosure : public art::Closure {
+ public:
+ explicit StopThreadClosure(art::Handle<art::mirror::Throwable> except) : exception_(except) { }
+
+ void Run(art::Thread* me) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ // Make sure the thread is prepared to notice the exception.
+ art::Runtime::Current()->GetInstrumentation()->InstrumentThreadStack(me);
+ me->SetAsyncException(exception_.Get());
+ // Wake up the thread if it is sleeping.
+ me->Notify();
+ }
+
+ private:
+ art::Handle<art::mirror::Throwable> exception_;
+ };
+ StopThreadClosure c(exc);
+ if (target->RequestSynchronousCheckpoint(&c)) {
+ return OK;
+ } else {
+ // Something went wrong, probably the thread died.
+ return ERR(THREAD_NOT_ALIVE);
+ }
+}
+
+jvmtiError ThreadUtil::InterruptThread(jvmtiEnv* env ATTRIBUTE_UNUSED, jthread thread) {
+ art::Thread* self = art::Thread::Current();
+ art::ScopedObjectAccess soa(self);
+ art::MutexLock tll_mu(self, *art::Locks::thread_list_lock_);
+ art::Thread* target = nullptr;
+ jvmtiError err = ERR(INTERNAL);
+ if (!GetAliveNativeThread(thread, soa, &target, &err)) {
+ return err;
+ } else if (target->GetState() == art::ThreadState::kStarting || target->IsStillStarting()) {
+ return ERR(THREAD_NOT_ALIVE);
+ }
+ target->Interrupt(self);
+ return OK;
+}
+
} // namespace openjdkjvmti
diff --git a/openjdkjvmti/ti_thread.h b/openjdkjvmti/ti_thread.h
index ceebff6..09b4cab 100644
--- a/openjdkjvmti/ti_thread.h
+++ b/openjdkjvmti/ti_thread.h
@@ -93,6 +93,9 @@
const jthread* threads,
jvmtiError* results);
+ static jvmtiError StopThread(jvmtiEnv* env, jthread thr, jobject exception);
+ static jvmtiError InterruptThread(jvmtiEnv* env, jthread thr);
+
// Returns true if we decoded the thread and it is alive, false otherwise with an appropriate
// error placed into 'err'. A thread is alive if it has had it's 'start' function called and has
// (or at least could have) executed managed code and has not yet returned past it's first managed
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 711bc65..ea776e7 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -530,6 +530,7 @@
"barrier_test.cc",
"base/arena_allocator_test.cc",
"base/bit_field_test.cc",
+ "base/bit_struct_test.cc",
"base/bit_utils_test.cc",
"base/bit_vector_test.cc",
"base/hash_set_test.cc",
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 7ff35ac..4181169 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -377,14 +377,14 @@
}
inline void ArtMethod::SetIntrinsic(uint32_t intrinsic) {
- DCHECK(IsUint<8>(intrinsic));
// Currently we only do intrinsics for static/final methods or methods of final
// classes. We don't set kHasSingleImplementation for those methods.
DCHECK(IsStatic() || IsFinal() || GetDeclaringClass()->IsFinal()) <<
"Potential conflict with kAccSingleImplementation";
- uint32_t new_value = (GetAccessFlags() & kAccFlagsNotUsedByIntrinsic) |
- kAccIntrinsic |
- (intrinsic << POPCOUNT(kAccFlagsNotUsedByIntrinsic));
+ static const int kAccFlagsShift = CTZ(kAccIntrinsicBits);
+ DCHECK_LE(intrinsic, kAccIntrinsicBits >> kAccFlagsShift);
+ uint32_t intrinsic_bits = intrinsic << kAccFlagsShift;
+ uint32_t new_value = (GetAccessFlags() & ~kAccIntrinsicBits) | kAccIntrinsic | intrinsic_bits;
if (kIsDebugBuild) {
uint32_t java_flags = (GetAccessFlags() & kAccJavaFlagsMask);
bool is_constructor = IsConstructor();
diff --git a/runtime/art_method.h b/runtime/art_method.h
index fbdc32d..caef81c 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -117,26 +117,6 @@
access_flags_.store(new_access_flags, std::memory_order_relaxed);
}
- // This setter guarantees atomicity.
- void AddAccessFlags(uint32_t flag) {
- uint32_t old_access_flags;
- uint32_t new_access_flags;
- do {
- old_access_flags = access_flags_.load(std::memory_order_relaxed);
- new_access_flags = old_access_flags | flag;
- } while (!access_flags_.compare_exchange_weak(old_access_flags, new_access_flags));
- }
-
- // This setter guarantees atomicity.
- void ClearAccessFlags(uint32_t flag) {
- uint32_t old_access_flags;
- uint32_t new_access_flags;
- do {
- old_access_flags = access_flags_.load(std::memory_order_relaxed);
- new_access_flags = old_access_flags & ~flag;
- } while (!access_flags_.compare_exchange_weak(old_access_flags, new_access_flags));
- }
-
static MemberOffset AccessFlagsOffset() {
return MemberOffset(OFFSETOF_MEMBER(ArtMethod, access_flags_));
}
@@ -196,12 +176,21 @@
ALWAYS_INLINE void SetIntrinsic(uint32_t intrinsic) REQUIRES_SHARED(Locks::mutator_lock_);
uint32_t GetIntrinsic() {
+ static const int kAccFlagsShift = CTZ(kAccIntrinsicBits);
+ static_assert(IsPowerOfTwo((kAccIntrinsicBits >> kAccFlagsShift) + 1),
+ "kAccIntrinsicBits are not continuous");
+ static_assert((kAccIntrinsic & kAccIntrinsicBits) == 0,
+ "kAccIntrinsic overlaps kAccIntrinsicBits");
DCHECK(IsIntrinsic());
- return (GetAccessFlags() >> POPCOUNT(kAccFlagsNotUsedByIntrinsic)) & kAccMaxIntrinsic;
+ return (GetAccessFlags() & kAccIntrinsicBits) >> kAccFlagsShift;
+ }
+
+ void SetNotIntrinsic() REQUIRES_SHARED(Locks::mutator_lock_) {
+ ClearAccessFlags(kAccIntrinsic | kAccIntrinsicBits);
}
bool IsCopied() {
- static_assert((kAccCopied & kAccFlagsNotUsedByIntrinsic) == kAccCopied,
+ static_assert((kAccCopied & (kAccIntrinsic | kAccIntrinsicBits)) == 0,
"kAccCopied conflicts with intrinsic modifier");
const bool copied = (GetAccessFlags() & kAccCopied) != 0;
// (IsMiranda() || IsDefaultConflicting()) implies copied
@@ -211,7 +200,7 @@
}
bool IsMiranda() {
- static_assert((kAccMiranda & kAccFlagsNotUsedByIntrinsic) == kAccMiranda,
+ static_assert((kAccMiranda & (kAccIntrinsic | kAccIntrinsicBits)) == 0,
"kAccMiranda conflicts with intrinsic modifier");
return (GetAccessFlags() & kAccMiranda) != 0;
}
@@ -245,7 +234,7 @@
// This is set by the class linker.
bool IsDefault() {
- static_assert((kAccDefault & kAccFlagsNotUsedByIntrinsic) == kAccDefault,
+ static_assert((kAccDefault & (kAccIntrinsic | kAccIntrinsicBits)) == 0,
"kAccDefault conflicts with intrinsic modifier");
return (GetAccessFlags() & kAccDefault) != 0;
}
@@ -290,6 +279,22 @@
AddAccessFlags(kAccSkipAccessChecks);
}
+ bool PreviouslyWarm() {
+ if (IsIntrinsic()) {
+ // kAccPreviouslyWarm overlaps with kAccIntrinsicBits.
+ return true;
+ }
+ return (GetAccessFlags() & kAccPreviouslyWarm) != 0;
+ }
+
+ void SetPreviouslyWarm() {
+ if (IsIntrinsic()) {
+ // kAccPreviouslyWarm overlaps with kAccIntrinsicBits.
+ return;
+ }
+ AddAccessFlags(kAccPreviouslyWarm);
+ }
+
// Should this method be run in the interpreter and count locks (e.g., failed structured-
// locking verification)?
bool MustCountLocks() {
@@ -299,6 +304,10 @@
return (GetAccessFlags() & kAccMustCountLocks) != 0;
}
+ void SetMustCountLocks() {
+ AddAccessFlags(kAccMustCountLocks);
+ }
+
// Checks to see if the method was annotated with @dalvik.annotation.optimization.FastNative
// -- Independent of kAccFastNative access flags.
bool IsAnnotatedWithFastNative();
@@ -782,6 +791,37 @@
template <ReadBarrierOption kReadBarrierOption> void GetAccessFlagsDCheck();
+ static inline bool IsValidIntrinsicUpdate(uint32_t modifier) {
+ return (((modifier & kAccIntrinsic) == kAccIntrinsic) &&
+ (((modifier & ~(kAccIntrinsic | kAccIntrinsicBits)) == 0)));
+ }
+
+ static inline bool OverlapsIntrinsicBits(uint32_t modifier) {
+ return (modifier & kAccIntrinsicBits) != 0;
+ }
+
+ // This setter guarantees atomicity.
+ void AddAccessFlags(uint32_t flag) {
+ DCHECK(!IsIntrinsic() || !OverlapsIntrinsicBits(flag) || IsValidIntrinsicUpdate(flag));
+ uint32_t old_access_flags;
+ uint32_t new_access_flags;
+ do {
+ old_access_flags = access_flags_.load(std::memory_order_relaxed);
+ new_access_flags = old_access_flags | flag;
+ } while (!access_flags_.compare_exchange_weak(old_access_flags, new_access_flags));
+ }
+
+ // This setter guarantees atomicity.
+ void ClearAccessFlags(uint32_t flag) {
+ DCHECK(!IsIntrinsic() || !OverlapsIntrinsicBits(flag) || IsValidIntrinsicUpdate(flag));
+ uint32_t old_access_flags;
+ uint32_t new_access_flags;
+ do {
+ old_access_flags = access_flags_.load(std::memory_order_relaxed);
+ new_access_flags = old_access_flags & ~flag;
+ } while (!access_flags_.compare_exchange_weak(old_access_flags, new_access_flags));
+ }
+
DISALLOW_COPY_AND_ASSIGN(ArtMethod); // Need to use CopyFrom to deal with 32 vs 64 bits.
};
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index 8738adf..c48e30f 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -149,7 +149,10 @@
os << "===== Allocation by kind\n";
static_assert(arraysize(kAllocNames) == kNumArenaAllocKinds, "arraysize of kAllocNames");
for (int i = 0; i < kNumArenaAllocKinds; i++) {
+ // Reduce output by listing only allocation kinds that actually have allocations.
+ if (alloc_stats_[i] != 0u) {
os << kAllocNames[i] << std::setw(10) << alloc_stats_[i] << "\n";
+ }
}
}
diff --git a/runtime/base/arena_allocator_test.cc b/runtime/base/arena_allocator_test.cc
index e2c2e2f..6bf56c8 100644
--- a/runtime/base/arena_allocator_test.cc
+++ b/runtime/base/arena_allocator_test.cc
@@ -23,9 +23,9 @@
class ArenaAllocatorTest : public testing::Test {
protected:
- size_t NumberOfArenas(ArenaAllocator* arena) {
+ size_t NumberOfArenas(ArenaAllocator* allocator) {
size_t result = 0u;
- for (Arena* a = arena->arena_head_; a != nullptr; a = a->next_) {
+ for (Arena* a = allocator->arena_head_; a != nullptr; a = a->next_) {
++result;
}
return result;
diff --git a/runtime/base/arena_containers.h b/runtime/base/arena_containers.h
index 62b974e..2e71156 100644
--- a/runtime/base/arena_containers.h
+++ b/runtime/base/arena_containers.h
@@ -137,22 +137,22 @@
typedef ArenaAllocatorAdapter<U> other;
};
- explicit ArenaAllocatorAdapter(ArenaAllocator* arena_allocator,
+ explicit ArenaAllocatorAdapter(ArenaAllocator* allocator,
ArenaAllocKind kind = kArenaAllocSTL)
: ArenaAllocatorAdapterKind(kind),
- arena_allocator_(arena_allocator) {
+ allocator_(allocator) {
}
template <typename U>
ArenaAllocatorAdapter(const ArenaAllocatorAdapter<U>& other) // NOLINT, implicit
: ArenaAllocatorAdapterKind(other),
- arena_allocator_(other.arena_allocator_) {
+ allocator_(other.allocator_) {
}
ArenaAllocatorAdapter(const ArenaAllocatorAdapter&) = default;
ArenaAllocatorAdapter& operator=(const ArenaAllocatorAdapter&) = default;
~ArenaAllocatorAdapter() = default;
private:
- ArenaAllocator* arena_allocator_;
+ ArenaAllocator* allocator_;
template <typename U>
friend class ArenaAllocatorAdapter;
@@ -174,14 +174,14 @@
typedef ArenaAllocatorAdapter<U> other;
};
- ArenaAllocatorAdapter(ArenaAllocator* arena_allocator, ArenaAllocKind kind)
+ ArenaAllocatorAdapter(ArenaAllocator* allocator, ArenaAllocKind kind)
: ArenaAllocatorAdapterKind(kind),
- arena_allocator_(arena_allocator) {
+ allocator_(allocator) {
}
template <typename U>
ArenaAllocatorAdapter(const ArenaAllocatorAdapter<U>& other) // NOLINT, implicit
: ArenaAllocatorAdapterKind(other),
- arena_allocator_(other.arena_allocator_) {
+ allocator_(other.allocator_) {
}
ArenaAllocatorAdapter(const ArenaAllocatorAdapter&) = default;
ArenaAllocatorAdapter& operator=(const ArenaAllocatorAdapter&) = default;
@@ -197,10 +197,10 @@
pointer allocate(size_type n,
ArenaAllocatorAdapter<void>::pointer hint ATTRIBUTE_UNUSED = nullptr) {
DCHECK_LE(n, max_size());
- return arena_allocator_->AllocArray<T>(n, ArenaAllocatorAdapterKind::Kind());
+ return allocator_->AllocArray<T>(n, ArenaAllocatorAdapterKind::Kind());
}
void deallocate(pointer p, size_type n) {
- arena_allocator_->MakeInaccessible(p, sizeof(T) * n);
+ allocator_->MakeInaccessible(p, sizeof(T) * n);
}
template <typename U, typename... Args>
@@ -213,7 +213,7 @@
}
private:
- ArenaAllocator* arena_allocator_;
+ ArenaAllocator* allocator_;
template <typename U>
friend class ArenaAllocatorAdapter;
@@ -226,7 +226,7 @@
template <typename T>
inline bool operator==(const ArenaAllocatorAdapter<T>& lhs,
const ArenaAllocatorAdapter<T>& rhs) {
- return lhs.arena_allocator_ == rhs.arena_allocator_;
+ return lhs.allocator_ == rhs.allocator_;
}
template <typename T>
diff --git a/runtime/base/arena_object.h b/runtime/base/arena_object.h
index 2d8e7d8..ed00bab 100644
--- a/runtime/base/arena_object.h
+++ b/runtime/base/arena_object.h
@@ -32,8 +32,8 @@
return allocator->Alloc(size, kAllocKind);
}
- static void* operator new(size_t size, ScopedArenaAllocator* arena) {
- return arena->Alloc(size, kAllocKind);
+ static void* operator new(size_t size, ScopedArenaAllocator* allocator) {
+ return allocator->Alloc(size, kAllocKind);
}
void operator delete(void*, size_t) {
@@ -56,8 +56,8 @@
return allocator->Alloc(size, kAllocKind);
}
- static void* operator new(size_t size, ScopedArenaAllocator* arena) {
- return arena->Alloc(size, kAllocKind);
+ static void* operator new(size_t size, ScopedArenaAllocator* allocator) {
+ return allocator->Alloc(size, kAllocKind);
}
void operator delete(void*, size_t) {
diff --git a/runtime/base/bit_struct.h b/runtime/base/bit_struct.h
new file mode 100644
index 0000000..1f86ee1
--- /dev/null
+++ b/runtime/base/bit_struct.h
@@ -0,0 +1,290 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_BIT_STRUCT_H_
+#define ART_RUNTIME_BASE_BIT_STRUCT_H_
+
+#include "bit_struct_detail.h"
+#include "bit_utils.h"
+
+//
+// Zero-cost, type-safe, well-defined "structs" of bit fields.
+//
+// ---------------------------------------------
+// Usage example:
+// ---------------------------------------------
+//
+// // Definition for type 'Example'
+// BITSTRUCT_DEFINE_START(Example, 10)
+// BitStructUint<0, 2> u2; // Every field must be a BitStruct[*].
+// BitStructInt<2, 7> i7;
+// BitStructUint<9, 1> i1;
+// BITSTRUCT_DEFINE_END(Example);
+//
+// Would define a bit struct with this layout:
+// <- 1 -> <-- 7 --> <- 2 ->
+// +--------+---------------+-----+
+// | i1 | i7 | u2 +
+// +--------+---------------+-----+
+// 10 9 2 0
+//
+// // Read-write just like regular values.
+// Example ex;
+// ex.u2 = 3;
+// ex.i7 = -25;
+// ex.i1 = true;
+// size_t u2 = ex.u2;
+// int i7 = ex.i7;
+// bool i1 = ex.i1;
+//
+// // It's packed down to the smallest # of machine words.
+// assert(sizeof(Example) == 2);
+// // The exact bit pattern is well-defined by the template parameters.
+// uint16_t cast = *reinterpret_cast<uint16_t*>(ex);
+// assert(cast == ((3) | (0b100111 << 2) | (true << 9);
+//
+// ---------------------------------------------
+// Why not just use C++ bitfields?
+// ---------------------------------------------
+//
+// The layout is implementation-defined.
+// We do not know whether the fields are packed left-to-right or
+// right-to-left, so it makes it useless when the memory layout needs to be
+// precisely controlled.
+//
+// ---------------------------------------------
+// More info:
+// ---------------------------------------------
+// Currently uintmax_t is the largest supported underlying storage type,
+// all (kBitOffset + kBitWidth) must fit into BitSizeOf<uintmax_t>();
+//
+// Using BitStruct[U]int will automatically select an underlying type
+// that's the smallest to fit your (offset + bitwidth).
+//
+// BitStructNumber can be used to manually select an underlying type.
+//
+// BitStructField can be used with custom standard-layout structs,
+// thus allowing for arbitrary nesting of bit structs.
+//
+namespace art {
+// Zero-cost wrapper around a struct 'T', allowing it to be stored as a bitfield
+// at offset 'kBitOffset' and width 'kBitWidth'.
+// The storage is plain unsigned int, whose size is the smallest required to fit
+// 'kBitOffset + kBitWidth'. All operations to this become BitFieldExtract/BitFieldInsert
+// operations to the underlying uint.
+//
+// Field memory representation:
+//
+// MSB <-- width --> LSB
+// +--------+------------+--------+
+// | ?????? | u bitfield | ?????? +
+// +--------+------------+--------+
+// offset 0
+//
+// Reading/writing the bitfield (un)packs it into a temporary T:
+//
+// MSB <-- width --> LSB
+// +-----------------+------------+
+// | 0.............0 | T bitfield |
+// +-----------------+------------+
+// 0
+//
+// It's the responsibility of the StorageType to ensure the bit representation
+// of T can be represented by kBitWidth.
+template <typename T,
+ size_t kBitOffset,
+ size_t kBitWidth = BitStructSizeOf<T>(),
+ typename StorageType = typename detail::MinimumTypeUnsignedHelper<kBitOffset + kBitWidth>::type>
+struct BitStructField {
+ static_assert(std::is_standard_layout<T>::value, "T must be standard layout");
+
+ operator T() const {
+ return Get();
+ }
+
+ // Exclude overload when T==StorageType.
+ template <typename _ = void,
+ typename = std::enable_if_t<std::is_same<T, StorageType>::value, _>>
+ explicit operator StorageType() const {
+ return GetStorage();
+ }
+
+ BitStructField& operator=(T value) {
+ return Assign(*this, value);
+ }
+
+ static constexpr size_t BitStructSizeOf() {
+ return kBitWidth;
+ }
+
+ protected:
+ template <typename T2>
+ T2& Assign(T2& what, T value) {
+ // Since C++ doesn't allow the type of operator= to change out
+ // in the subclass, reimplement operator= in each subclass
+ // manually and call this helper function.
+ static_assert(std::is_base_of<BitStructField, T2>::value, "T2 must inherit BitStructField");
+ what.Set(value);
+ return what;
+ }
+
+ T Get() const {
+ ValueStorage vs;
+ vs.pod_.val_ = GetStorage();
+ return vs.value_;
+ }
+
+ void Set(T value) {
+ ValueStorage value_as_storage;
+ value_as_storage.value_ = value;
+
+ storage_.pod_.val_ = BitFieldInsert(storage_.pod_.val_,
+ value_as_storage.pod_.val_,
+ kBitOffset,
+ kBitWidth);
+ }
+
+ private:
+ StorageType GetStorage() const {
+ return BitFieldExtract(storage_.pod_.val_, kBitOffset, kBitWidth);
+ }
+
+ // Underlying value must be wrapped in a separate standard-layout struct.
+ // See below for more details.
+ struct PodWrapper {
+ StorageType val_;
+ };
+
+ union ValueStorage {
+ // Safely alias pod_ and value_ together.
+ //
+ // See C++ 9.5.1 [class.union]:
+ // If a standard-layout union contains several standard-layout structs that share a common
+ // initial sequence ... it is permitted to inspect the common initial sequence of any of
+ // standard-layout struct members.
+ PodWrapper pod_;
+ T value_;
+ } storage_;
+
+ // Future work: In theory almost non-standard layout can be supported here,
+ // assuming they don't rely on the address of (this).
+ // We just have to use memcpy since the union-aliasing would not work.
+};
+
+// Base class for number-like BitStruct fields.
+// T is the type to store in as a bit field.
+// kBitOffset, kBitWidth define the position and length of the bitfield.
+//
+// (Common usage should be BitStructInt, BitStructUint -- this
+// intermediate template allows a user-defined integer to be used.)
+template <typename T, size_t kBitOffset, size_t kBitWidth>
+struct BitStructNumber : public BitStructField<T, kBitOffset, kBitWidth, /*StorageType*/T> {
+ using StorageType = T;
+
+ BitStructNumber& operator=(T value) {
+ return BaseType::Assign(*this, value);
+ }
+
+ /*implicit*/ operator T() const {
+ return Get();
+ }
+
+ explicit operator bool() const {
+ return static_cast<bool>(Get());
+ }
+
+ BitStructNumber& operator++() {
+ *this = Get() + 1u;
+ return *this;
+ }
+
+ StorageType operator++(int) {
+ return Get() + 1u;
+ }
+
+ BitStructNumber& operator--() {
+ *this = Get() - 1u;
+ return *this;
+ }
+
+ StorageType operator--(int) {
+ return Get() - 1u;
+ }
+
+ private:
+ using BaseType = BitStructField<T, kBitOffset, kBitWidth, /*StorageType*/T>;
+ using BaseType::Get;
+};
+
+// Create a BitStruct field which uses the smallest underlying int storage type,
+// in order to be large enough to fit (kBitOffset + kBitWidth).
+//
+// Values are sign-extended when they are read out.
+template <size_t kBitOffset, size_t kBitWidth>
+using BitStructInt =
+ BitStructNumber<typename detail::MinimumTypeHelper<int, kBitOffset + kBitWidth>::type,
+ kBitOffset,
+ kBitWidth>;
+
+// Create a BitStruct field which uses the smallest underlying uint storage type,
+// in order to be large enough to fit (kBitOffset + kBitWidth).
+//
+// Values are zero-extended when they are read out.
+template <size_t kBitOffset, size_t kBitWidth>
+using BitStructUint =
+ BitStructNumber<typename detail::MinimumTypeHelper<unsigned int, kBitOffset + kBitWidth>::type,
+ kBitOffset,
+ kBitWidth>;
+
+// Start a definition for a bitstruct.
+// A bitstruct is defined to be a union with a common initial subsequence
+// that we call 'DefineBitStructSize<bitwidth>'.
+//
+// See top of file for usage example.
+//
+// This marker is required by the C++ standard in order to
+// have a "common initial sequence".
+//
+// See C++ 9.5.1 [class.union]:
+// If a standard-layout union contains several standard-layout structs that share a common
+// initial sequence ... it is permitted to inspect the common initial sequence of any of
+// standard-layout struct members.
+#define BITSTRUCT_DEFINE_START(name, bitwidth) \
+ union name { \
+ art::detail::DefineBitStructSize<(bitwidth)> _; \
+ static constexpr size_t BitStructSizeOf() { return (bitwidth); }
+
+// End the definition of a bitstruct, and insert a sanity check
+// to ensure that the bitstruct did not exceed the specified size.
+//
+// See top of file for usage example.
+#define BITSTRUCT_DEFINE_END(name) \
+ }; /* NOLINT [readability/braces] [4] */ \
+ static_assert(art::detail::ValidateBitStructSize<name>(), \
+ #name "bitsize incorrect: " \
+ "did you insert extra fields that weren't BitStructX, " \
+ "and does the size match the sum of the field widths?")
+
+// Determine the minimal bit size for a user-defined type T.
+// Used by BitStructField to determine how small a custom type is.
+template <typename T>
+static constexpr size_t BitStructSizeOf() {
+ return T::BitStructSizeOf();
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_BASE_BIT_STRUCT_H_
diff --git a/runtime/base/bit_struct_detail.h b/runtime/base/bit_struct_detail.h
new file mode 100644
index 0000000..9f629c0
--- /dev/null
+++ b/runtime/base/bit_struct_detail.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_BIT_STRUCT_DETAIL_H_
+#define ART_RUNTIME_BASE_BIT_STRUCT_DETAIL_H_
+
+#include "bit_utils.h"
+#include "globals.h"
+
+#include <type_traits>
+
+// Implementation details for bit_struct.h
+// Not intended to be used stand-alone.
+
+namespace art {
+
+template <typename T>
+static constexpr size_t BitStructSizeOf();
+
+namespace detail {
+ // Select the smallest uintX_t that will fit kBitSize bits.
+ template <size_t kBitSize>
+ struct MinimumTypeUnsignedHelper {
+ using type =
+ typename std::conditional<kBitSize == 0, void,
+ typename std::conditional<kBitSize <= 8, uint8_t,
+ typename std::conditional<kBitSize <= 16, uint16_t,
+ typename std::conditional<kBitSize <= 32, uint32_t,
+ typename std::conditional<kBitSize <= 64, uint64_t,
+ typename std::conditional<kBitSize <= BitSizeOf<uintmax_t>(), uintmax_t,
+ void>::type>::type>::type>::type>::type>::type;
+ };
+
+ // Select the smallest [u]intX_t that will fit kBitSize bits.
+ // Automatically picks intX_t or uintX_t based on the sign-ness of T.
+ template <typename T, size_t kBitSize>
+ struct MinimumTypeHelper {
+ using type_unsigned = typename MinimumTypeUnsignedHelper<kBitSize>::type;
+
+ using type =
+ typename std::conditional</* if */ std::is_signed<T>::value,
+ /* then */ typename std::make_signed<type_unsigned>::type,
+ /* else */ type_unsigned>::type;
+ };
+
+ // Ensure the minimal type storage for 'T' matches its declared BitStructSizeOf.
+ // Nominally used by the BITSTRUCT_DEFINE_END macro.
+ template <typename T>
+ static constexpr bool ValidateBitStructSize() {
+ const size_t kBitStructSizeOf = BitStructSizeOf<T>();
+ const size_t kExpectedSize = (BitStructSizeOf<T>() < kBitsPerByte)
+ ? kBitsPerByte
+ : RoundUpToPowerOfTwo(kBitStructSizeOf);
+
+ // Ensure no extra fields were added in between START/END.
+ const size_t kActualSize = sizeof(T) * kBitsPerByte;
+ return kExpectedSize == kActualSize;
+ }
+
+ // Denotes the beginning of a bit struct.
+ //
+ // This marker is required by the C++ standard in order to
+ // have a "common initial sequence".
+ //
+ // See C++ 9.5.1 [class.union]:
+ // If a standard-layout union contains several standard-layout structs that share a common
+ // initial sequence ... it is permitted to inspect the common initial sequence of any of
+ // standard-layout struct members.
+ template <size_t kSize>
+ struct DefineBitStructSize {
+ private:
+ typename MinimumTypeUnsignedHelper<kSize>::type _;
+ };
+} // namespace detail
+} // namespace art
+
+#endif // ART_RUNTIME_BASE_BIT_STRUCT_DETAIL_H_
diff --git a/runtime/base/bit_struct_test.cc b/runtime/base/bit_struct_test.cc
new file mode 100644
index 0000000..872ada3
--- /dev/null
+++ b/runtime/base/bit_struct_test.cc
@@ -0,0 +1,269 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "bit_struct.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+// A copy of detail::ValidateBitStructSize that uses EXPECT for a more
+// human-readable message.
+template <typename T>
+static constexpr bool ValidateBitStructSize(const char* name) {
+ const size_t kBitStructSizeOf = BitStructSizeOf<T>();
+ const size_t kExpectedSize = (BitStructSizeOf<T>() < kBitsPerByte)
+ ? kBitsPerByte
+ : RoundUpToPowerOfTwo(kBitStructSizeOf);
+
+ // Ensure no extra fields were added in between START/END.
+ const size_t kActualSize = sizeof(T) * kBitsPerByte;
+ EXPECT_EQ(kExpectedSize, kActualSize) << name;
+ return true;
+}
+
+#define VALIDATE_BITSTRUCT_SIZE(type) ValidateBitStructSize<type>(#type)
+
+TEST(BitStructs, MinimumType) {
+ EXPECT_EQ(1u, sizeof(typename detail::MinimumTypeUnsignedHelper<1>::type));
+ EXPECT_EQ(1u, sizeof(typename detail::MinimumTypeUnsignedHelper<2>::type));
+ EXPECT_EQ(1u, sizeof(typename detail::MinimumTypeUnsignedHelper<3>::type));
+ EXPECT_EQ(1u, sizeof(typename detail::MinimumTypeUnsignedHelper<8>::type));
+ EXPECT_EQ(2u, sizeof(typename detail::MinimumTypeUnsignedHelper<9>::type));
+ EXPECT_EQ(2u, sizeof(typename detail::MinimumTypeUnsignedHelper<10>::type));
+ EXPECT_EQ(2u, sizeof(typename detail::MinimumTypeUnsignedHelper<15>::type));
+ EXPECT_EQ(2u, sizeof(typename detail::MinimumTypeUnsignedHelper<16>::type));
+ EXPECT_EQ(4u, sizeof(typename detail::MinimumTypeUnsignedHelper<17>::type));
+ EXPECT_EQ(4u, sizeof(typename detail::MinimumTypeUnsignedHelper<32>::type));
+ EXPECT_EQ(8u, sizeof(typename detail::MinimumTypeUnsignedHelper<33>::type));
+ EXPECT_EQ(8u, sizeof(typename detail::MinimumTypeUnsignedHelper<64>::type));
+}
+
+template <typename T>
+size_t AsUint(const T& value) {
+ size_t uint_value = 0;
+ memcpy(&uint_value, &value, sizeof(value));
+ return uint_value;
+}
+
+struct CustomBitStruct {
+ CustomBitStruct() = default;
+ explicit CustomBitStruct(int8_t data) : data(data) {}
+
+ static constexpr size_t BitStructSizeOf() {
+ return 4;
+ }
+
+ int8_t data;
+};
+
+template <typename T>
+void ZeroInitialize(T& value) {
+ memset(&value, 0, sizeof(T));
+ // TODO: replace with value initialization
+}
+
+TEST(BitStructs, Custom) {
+ CustomBitStruct expected(0b1111);
+
+ BitStructField<CustomBitStruct, /*lsb*/4, /*width*/4> f;
+ ZeroInitialize(f);
+
+ EXPECT_EQ(1u, sizeof(f));
+
+ f = CustomBitStruct(0b1111);
+
+ CustomBitStruct read_out = f;
+ EXPECT_EQ(read_out.data, 0b1111);
+
+ EXPECT_EQ(AsUint(f), 0b11110000u);
+}
+
+BITSTRUCT_DEFINE_START(TestTwoCustom, /* size */ 8)
+ BitStructField<CustomBitStruct, /*lsb*/0, /*width*/4> f4_a;
+ BitStructField<CustomBitStruct, /*lsb*/4, /*width*/4> f4_b;
+BITSTRUCT_DEFINE_END(TestTwoCustom);
+
+TEST(BitStructs, TwoCustom) {
+ EXPECT_EQ(sizeof(TestTwoCustom), 1u);
+
+ VALIDATE_BITSTRUCT_SIZE(TestTwoCustom);
+
+ TestTwoCustom cst;
+ ZeroInitialize(cst);
+
+ // Test the write to most-significant field doesn't clobber least-significant.
+ cst.f4_a = CustomBitStruct(0b0110);
+ cst.f4_b = CustomBitStruct(0b0101);
+
+ int8_t read_out = static_cast<CustomBitStruct>(cst.f4_a).data;
+ int8_t read_out_b = static_cast<CustomBitStruct>(cst.f4_b).data;
+
+ EXPECT_EQ(0b0110, static_cast<int>(read_out));
+ EXPECT_EQ(0b0101, static_cast<int>(read_out_b));
+
+ EXPECT_EQ(AsUint(cst), 0b01010110u);
+
+ // Test write to least-significant field doesn't clobber most-significant.
+ cst.f4_a = CustomBitStruct(0);
+
+ read_out = static_cast<CustomBitStruct>(cst.f4_a).data;
+ read_out_b = static_cast<CustomBitStruct>(cst.f4_b).data;
+
+ EXPECT_EQ(0b0, static_cast<int>(read_out));
+ EXPECT_EQ(0b0101, static_cast<int>(read_out_b));
+
+ EXPECT_EQ(AsUint(cst), 0b01010000u);
+}
+
+TEST(BitStructs, Number) {
+ BitStructNumber<uint16_t, /*lsb*/4, /*width*/4> bsn;
+ ZeroInitialize(bsn);
+ EXPECT_EQ(2u, sizeof(bsn));
+
+ bsn = 0b1111;
+
+ uint32_t read_out = static_cast<uint32_t>(bsn);
+ uint32_t read_out_impl = bsn;
+
+ EXPECT_EQ(read_out, read_out_impl);
+ EXPECT_EQ(read_out, 0b1111u);
+ EXPECT_EQ(AsUint(bsn), 0b11110000u);
+}
+
+BITSTRUCT_DEFINE_START(TestBitStruct, /* size */ 8)
+ BitStructInt</*lsb*/0, /*width*/3> i3;
+ BitStructUint</*lsb*/3, /*width*/4> u4;
+
+ BitStructUint</*lsb*/0, /*width*/7> alias_all;
+BITSTRUCT_DEFINE_END(TestBitStruct);
+
+TEST(BitStructs, Test1) {
+ {
+ // Check minimal size selection is correct.
+ BitStructInt</*lsb*/0, /*width*/3> i3;
+ BitStructUint</*lsb*/3, /*width*/4> u4;
+
+ BitStructUint</*lsb*/0, /*width*/7> alias_all;
+
+ EXPECT_EQ(1u, sizeof(i3));
+ EXPECT_EQ(1u, sizeof(u4));
+ EXPECT_EQ(1u, sizeof(alias_all));
+ }
+ TestBitStruct tst;
+ ZeroInitialize(tst);
+
+ // Check minimal size selection is correct.
+ EXPECT_EQ(1u, sizeof(TestBitStruct));
+ EXPECT_EQ(1u, sizeof(tst._));
+ EXPECT_EQ(1u, sizeof(tst.i3));
+ EXPECT_EQ(1u, sizeof(tst.u4));
+ EXPECT_EQ(1u, sizeof(tst.alias_all));
+
+ // Check operator assignment.
+ tst.i3 = -1;
+ tst.u4 = 0b1010;
+
+ // Check implicit operator conversion.
+ int8_t read_i3 = tst.i3;
+ uint8_t read_u4 = tst.u4;
+
+ // Ensure read-out values were correct.
+ EXPECT_EQ(static_cast<int8_t>(-1), read_i3);
+ EXPECT_EQ(0b1010, read_u4);
+
+ // Ensure aliasing is working.
+ EXPECT_EQ(0b1010111, static_cast<uint8_t>(tst.alias_all));
+
+ // Ensure the bit pattern is correct.
+ EXPECT_EQ(0b1010111u, AsUint(tst));
+
+ // Math operator checks
+ {
+ // In-place
+ ++tst.u4;
+ EXPECT_EQ(static_cast<uint8_t>(0b1011), static_cast<uint8_t>(tst.u4));
+ --tst.u4;
+ EXPECT_EQ(static_cast<uint8_t>(0b1010), static_cast<uint8_t>(tst.u4));
+
+ // Copy
+ uint8_t read_and_convert = tst.u4++;
+ EXPECT_EQ(static_cast<uint8_t>(0b1011), read_and_convert);
+ EXPECT_EQ(static_cast<uint8_t>(0b1010), static_cast<uint8_t>(tst.u4));
+ read_and_convert = tst.u4--;
+ EXPECT_EQ(static_cast<uint8_t>(0b1001), read_and_convert);
+ EXPECT_EQ(static_cast<uint8_t>(0b1010), static_cast<uint8_t>(tst.u4));
+
+ // Check boolean operator conversion.
+ tst.u4 = 0b1010;
+ EXPECT_TRUE(static_cast<bool>(tst.u4));
+ bool succ = tst.u4 ? true : false;
+ EXPECT_TRUE(succ);
+
+ tst.u4 = 0;
+ EXPECT_FALSE(static_cast<bool>(tst.u4));
+
+/*
+ // Disabled: Overflow is caught by the BitFieldInsert DCHECKs.
+ // Check overflow for uint.
+ tst.u4 = 0b1111;
+ ++tst.u4;
+ EXPECT_EQ(static_cast<uint8_t>(0), static_cast<uint8_t>(tst.u4));
+*/
+ }
+}
+
+BITSTRUCT_DEFINE_START(MixedSizeBitStruct, /* size */ 32)
+ BitStructUint</*lsb*/0, /*width*/3> u3;
+ BitStructUint</*lsb*/3, /*width*/10> u10;
+ BitStructUint</*lsb*/13, /*width*/19> u19;
+
+ BitStructUint</*lsb*/0, /*width*/32> alias_all;
+BITSTRUCT_DEFINE_END(MixedSizeBitStruct);
+
+// static_assert(sizeof(MixedSizeBitStruct) == sizeof(uint32_t), "TestBitStructs#MixedSize");
+
+TEST(BitStructs, Mixed) {
+ EXPECT_EQ(4u, sizeof(MixedSizeBitStruct));
+
+ MixedSizeBitStruct tst;
+ ZeroInitialize(tst);
+
+ // Check operator assignment.
+ tst.u3 = 0b111u;
+ tst.u10 = 0b1111010100u;
+ tst.u19 = 0b1010101010101010101u;
+
+ // Check implicit operator conversion.
+ uint8_t read_u3 = tst.u3;
+ uint16_t read_u10 = tst.u10;
+ uint32_t read_u19 = tst.u19;
+
+ // Ensure read-out values were correct.
+ EXPECT_EQ(0b111u, read_u3);
+ EXPECT_EQ(0b1111010100u, read_u10);
+ EXPECT_EQ(0b1010101010101010101u, read_u19);
+
+ uint32_t read_all = tst.alias_all;
+
+ // Ensure aliasing is working.
+ EXPECT_EQ(0b10101010101010101011111010100111u, read_all);
+
+ // Ensure the bit pattern is correct.
+ EXPECT_EQ(0b10101010101010101011111010100111u, AsUint(tst));
+}
+
+} // namespace art
diff --git a/runtime/base/bit_utils.h b/runtime/base/bit_utils.h
index 87dac02..da3c704 100644
--- a/runtime/base/bit_utils.h
+++ b/runtime/base/bit_utils.h
@@ -371,6 +371,128 @@
return opnd;
}
+// Create a mask for the least significant "bits"
+// The returned value is always unsigned to prevent undefined behavior for bitwise ops.
+//
+// Given 'bits',
+// Returns:
+// <--- bits --->
+// +-----------------+------------+
+// | 0 ............0 | 1.....1 |
+// +-----------------+------------+
+// msb lsb
+template <typename T = size_t>
+inline static constexpr std::make_unsigned_t<T> MaskLeastSignificant(size_t bits) {
+ DCHECK_GE(BitSizeOf<T>(), bits) << "Bits out of range for type T";
+ using unsigned_T = std::make_unsigned_t<T>;
+ if (bits >= BitSizeOf<T>()) {
+ return std::numeric_limits<unsigned_T>::max();
+ } else {
+ return static_cast<unsigned_T>((1 << bits) - 1);
+ }
+}
+
+// Clears the bitfield starting at the least significant bit "lsb" with a bitwidth of 'width'.
+// (Equivalent of ARM BFC instruction).
+//
+// Given:
+// <-- width -->
+// +--------+------------+--------+
+// | ABC... | bitfield | XYZ... +
+// +--------+------------+--------+
+// lsb 0
+// Returns:
+// <-- width -->
+// +--------+------------+--------+
+// | ABC... | 0........0 | XYZ... +
+// +--------+------------+--------+
+// lsb 0
+template <typename T>
+inline static constexpr T BitFieldClear(T value, size_t lsb, size_t width) {
+ DCHECK_GE(BitSizeOf(value), lsb + width) << "Bit field out of range for value";
+ const auto val = static_cast<std::make_unsigned_t<T>>(value);
+ const auto mask = MaskLeastSignificant<T>(width);
+
+ return static_cast<T>(val & ~(mask << lsb));
+}
+
+// Inserts the contents of 'data' into bitfield of 'value' starting
+// at the least significant bit "lsb" with a bitwidth of 'width'.
+// Note: data must be within range of [MinInt(width), MaxInt(width)].
+// (Equivalent of ARM BFI instruction).
+//
+// Given (data):
+// <-- width -->
+// +--------+------------+--------+
+// | ABC... | bitfield | XYZ... +
+// +--------+------------+--------+
+// lsb 0
+// Returns:
+// <-- width -->
+// +--------+------------+--------+
+// | ABC... | 0...data | XYZ... +
+// +--------+------------+--------+
+// lsb 0
+
+template <typename T, typename T2>
+inline static constexpr T BitFieldInsert(T value, T2 data, size_t lsb, size_t width) {
+ DCHECK_GE(BitSizeOf(value), lsb + width) << "Bit field out of range for value";
+ if (width != 0u) {
+ DCHECK_GE(MaxInt<T2>(width), data) << "Data out of range [too large] for bitwidth";
+ DCHECK_LE(MinInt<T2>(width), data) << "Data out of range [too small] for bitwidth";
+ } else {
+ DCHECK_EQ(static_cast<T2>(0), data) << "Data out of range [nonzero] for bitwidth 0";
+ }
+ const auto data_mask = MaskLeastSignificant<T2>(width);
+ const auto value_cleared = BitFieldClear(value, lsb, width);
+
+ return static_cast<T>(value_cleared | ((data & data_mask) << lsb));
+}
+
+// Extracts the bitfield starting at the least significant bit "lsb" with a bitwidth of 'width'.
+// Signed types are sign-extended during extraction. (Equivalent of ARM UBFX/SBFX instruction).
+//
+// Given:
+// <-- width -->
+// +--------+-------------+-------+
+// | | bitfield | +
+// +--------+-------------+-------+
+// lsb 0
+// (Unsigned) Returns:
+// <-- width -->
+// +----------------+-------------+
+// | 0... 0 | bitfield |
+// +----------------+-------------+
+// 0
+// (Signed) Returns:
+// <-- width -->
+// +----------------+-------------+
+// | S... S | bitfield |
+// +----------------+-------------+
+// 0
+// where S is the highest bit in 'bitfield'.
+template <typename T>
+inline static constexpr T BitFieldExtract(T value, size_t lsb, size_t width) {
+ DCHECK_GE(BitSizeOf(value), lsb + width) << "Bit field out of range for value";
+ const auto val = static_cast<std::make_unsigned_t<T>>(value);
+
+ const T bitfield_unsigned =
+ static_cast<T>((val >> lsb) & MaskLeastSignificant<T>(width));
+ if (std::is_signed<T>::value) {
+ // Perform sign extension
+ if (width == 0) { // Avoid underflow.
+ return static_cast<T>(0);
+ } else if (bitfield_unsigned & (1 << (width - 1))) { // Detect if sign bit was set.
+ // MSB <width> LSB
+ // 0b11111...100...000000
+ const auto ones_negmask = ~MaskLeastSignificant<T>(width);
+ return static_cast<T>(bitfield_unsigned | ones_negmask);
+ }
+ }
+ // Skip sign extension.
+ return bitfield_unsigned;
+}
+
} // namespace art
#endif // ART_RUNTIME_BASE_BIT_UTILS_H_
diff --git a/runtime/base/bit_utils_test.cc b/runtime/base/bit_utils_test.cc
index c96c6dc..0276d8d 100644
--- a/runtime/base/bit_utils_test.cc
+++ b/runtime/base/bit_utils_test.cc
@@ -345,6 +345,97 @@
"TestIsAbsoluteUint64#27");
static_assert(!IsAbsoluteUint<32, int64_t>(kUint32MaxPlus1), "TestIsAbsoluteUint64#28");
+static_assert(MaskLeastSignificant(0) == 0b0, "TestMaskLeastSignificant#1");
+static_assert(MaskLeastSignificant(1) == 0b1, "TestMaskLeastSignificant#2");
+static_assert(MaskLeastSignificant(2) == 0b11, "TestMaskLeastSignificant#3");
+static_assert(MaskLeastSignificant<uint8_t>(8) == 0xFF, "TestMaskLeastSignificant#4");
+static_assert(MaskLeastSignificant<int8_t>(8) == 0xFF, "TestMaskLeastSignificant#5");
+
+static_assert(BitFieldClear(0xFF, /*lsb*/0, /*width*/0) == 0xFF, "TestBitFieldClear#1");
+static_assert(BitFieldClear(std::numeric_limits<uint32_t>::max(), /*lsb*/0, /*width*/32) == 0x0,
+ "TestBitFieldClear#2");
+static_assert(BitFieldClear(std::numeric_limits<int32_t>::max(), /*lsb*/0, /*width*/32) == 0x0,
+ "TestBitFieldClear#3");
+static_assert(BitFieldClear(0xFF, /*lsb*/0, /*width*/2) == 0b11111100, "TestBitFieldClear#4");
+static_assert(BitFieldClear(0xFF, /*lsb*/0, /*width*/3) == 0b11111000, "TestBitFieldClear#5");
+static_assert(BitFieldClear(0xFF, /*lsb*/1, /*width*/3) == 0b11110001, "TestBitFieldClear#6");
+static_assert(BitFieldClear(0xFF, /*lsb*/2, /*width*/3) == 0b11100011, "TestBitFieldClear#7");
+
+static_assert(BitFieldExtract(0xFF, /*lsb*/0, /*width*/0) == 0x0, "TestBitFieldExtract#1");
+static_assert(BitFieldExtract(std::numeric_limits<uint32_t>::max(), /*lsb*/0, /*width*/32)
+ == std::numeric_limits<uint32_t>::max(),
+ "TestBitFieldExtract#2");
+static_assert(BitFieldExtract(std::numeric_limits<int32_t>::max(), /*lsb*/0, /*width*/32)
+ == std::numeric_limits<int32_t>::max(),
+ "TestBitFieldExtract#3");
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/0, /*width*/2) == 0b00000011,
+ "TestBitFieldExtract#4");
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/0, /*width*/3) == 0b00000111,
+ "TestBitFieldExtract#5");
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/1, /*width*/3) == 0b00000111,
+ "TestBitFieldExtract#6");
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/2, /*width*/3) == 0b00000111,
+ "TestBitFieldExtract#7");
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/3, /*width*/3) == 0b00000111,
+ "TestBitFieldExtract#8");
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/8, /*width*/3) == 0b00000000,
+ "TestBitFieldExtract#9");
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/7, /*width*/3) == 0b00000001,
+ "TestBitFieldExtract#10");
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/6, /*width*/3) == 0b00000011,
+ "TestBitFieldExtract#11");
+static_assert(BitFieldExtract(0xFF, /*lsb*/0, /*width*/2) == -1, "TestBitFieldExtract#12");
+static_assert(BitFieldExtract(0xFF, /*lsb*/0, /*width*/3) == -1, "TestBitFieldExtract#13");
+static_assert(BitFieldExtract(0xFF, /*lsb*/1, /*width*/3) == -1, "TestBitFieldExtract#14");
+static_assert(BitFieldExtract(0xFF, /*lsb*/2, /*width*/3) == -1, "TestBitFieldExtract#15");
+static_assert(BitFieldExtract(0xFF, /*lsb*/3, /*width*/3) == -1, "TestBitFieldExtract#16");
+static_assert(BitFieldExtract(0xFF, /*lsb*/8, /*width*/3) == 0b00000000, "TestBitFieldExtract#17");
+static_assert(BitFieldExtract(0xFF, /*lsb*/7, /*width*/3) == 0b00000001, "TestBitFieldExtract#18");
+static_assert(BitFieldExtract(0xFF, /*lsb*/6, /*width*/3) == 0b00000011, "TestBitFieldExtract#19");
+static_assert(BitFieldExtract(static_cast<uint8_t>(0b01101010), /*lsb*/2, /*width*/4)
+ == 0b00001010,
+ "TestBitFieldExtract#20");
+static_assert(BitFieldExtract(static_cast<int8_t>(0b01101010), /*lsb*/2, /*width*/4)
+ == static_cast<int8_t>(0b11111010),
+ "TestBitFieldExtract#21");
+
+static_assert(BitFieldInsert(0xFF, /*data*/0x0, /*lsb*/0, /*width*/0) == 0xFF,
+ "TestBitFieldInsert#1");
+static_assert(BitFieldInsert(std::numeric_limits<uint32_t>::max(),
+ /*data*/std::numeric_limits<uint32_t>::max(),
+ /*lsb*/0,
+ /*width*/32)
+ == std::numeric_limits<uint32_t>::max(),
+ "TestBitFieldInsert#2");
+static_assert(BitFieldInsert(std::numeric_limits<int32_t>::max(),
+ /*data*/std::numeric_limits<uint32_t>::max(),
+ /*lsb*/0,
+ /*width*/32)
+ == std::numeric_limits<uint32_t>::max(),
+ "TestBitFieldInsert#3");
+static_assert(BitFieldInsert(0u,
+ /*data*/std::numeric_limits<uint32_t>::max(),
+ /*lsb*/0,
+ /*width*/32)
+ == std::numeric_limits<uint32_t>::max(),
+ "TestBitFieldInsert#4");
+static_assert(BitFieldInsert(-(-0),
+ /*data*/std::numeric_limits<uint32_t>::max(),
+ /*lsb*/0,
+ /*width*/32)
+ == std::numeric_limits<uint32_t>::max(),
+ "TestBitFieldInsert#5");
+static_assert(BitFieldInsert(0x00, /*data*/0b11u, /*lsb*/0, /*width*/2) == 0b00000011,
+ "TestBitFieldInsert#6");
+static_assert(BitFieldInsert(0x00, /*data*/0b111u, /*lsb*/0, /*width*/3) == 0b00000111,
+ "TestBitFieldInsert#7");
+static_assert(BitFieldInsert(0x00, /*data*/0b111u, /*lsb*/1, /*width*/3) == 0b00001110,
+ "TestBitFieldInsert#8");
+static_assert(BitFieldInsert(0x00, /*data*/0b111u, /*lsb*/2, /*width*/3) == 0b00011100,
+ "TestBitFieldInsert#9");
+static_assert(BitFieldInsert(0b01011100, /*data*/0b1101u, /*lsb*/4, /*width*/4) == 0b11011100,
+ "TestBitFieldInsert#10");
+
template <typename Container>
void CheckElements(const std::initializer_list<uint32_t>& expected, const Container& elements) {
auto expected_it = expected.begin();
diff --git a/runtime/base/hash_set.h b/runtime/base/hash_set.h
index c472a9e..bc25b36 100644
--- a/runtime/base/hash_set.h
+++ b/runtime/base/hash_set.h
@@ -22,6 +22,7 @@
#include <functional>
#include <iterator>
#include <memory>
+#include <type_traits>
#include <utility>
#include "bit_utils.h"
@@ -385,18 +386,20 @@
}
// Insert an element, allows duplicates.
- void Insert(const T& element) {
- InsertWithHash(element, hashfn_(element));
+ template <typename U, typename = typename std::enable_if<std::is_convertible<U, T>::value>::type>
+ void Insert(U&& element) {
+ InsertWithHash(std::forward<U>(element), hashfn_(element));
}
- void InsertWithHash(const T& element, size_t hash) {
+ template <typename U, typename = typename std::enable_if<std::is_convertible<U, T>::value>::type>
+ void InsertWithHash(U&& element, size_t hash) {
DCHECK_EQ(hash, hashfn_(element));
if (num_elements_ >= elements_until_expand_) {
Expand();
DCHECK_LT(num_elements_, elements_until_expand_);
}
const size_t index = FirstAvailableSlot(IndexForHash(hash));
- data_[index] = element;
+ data_[index] = std::forward<U>(element);
++num_elements_;
}
diff --git a/runtime/base/scoped_arena_allocator.h b/runtime/base/scoped_arena_allocator.h
index 1a0eb5e..f156f52 100644
--- a/runtime/base/scoped_arena_allocator.h
+++ b/runtime/base/scoped_arena_allocator.h
@@ -145,6 +145,10 @@
explicit ScopedArenaAllocator(ArenaStack* arena_stack);
~ScopedArenaAllocator();
+ ArenaStack* GetArenaStack() const {
+ return arena_stack_;
+ }
+
void Reset();
void* Alloc(size_t bytes, ArenaAllocKind kind = kArenaAllocMisc) ALWAYS_INLINE {
diff --git a/runtime/base/scoped_arena_containers.h b/runtime/base/scoped_arena_containers.h
index 4a6c907..fccaaea 100644
--- a/runtime/base/scoped_arena_containers.h
+++ b/runtime/base/scoped_arena_containers.h
@@ -52,17 +52,40 @@
using ScopedArenaVector = dchecked_vector<T, ScopedArenaAllocatorAdapter<T>>;
template <typename T, typename Comparator = std::less<T>>
+using ScopedArenaPriorityQueue = std::priority_queue<T, ScopedArenaVector<T>, Comparator>;
+
+template <typename T>
+using ScopedArenaStdStack = std::stack<T, ScopedArenaDeque<T>>;
+
+template <typename T, typename Comparator = std::less<T>>
using ScopedArenaSet = std::set<T, Comparator, ScopedArenaAllocatorAdapter<T>>;
template <typename K, typename V, typename Comparator = std::less<K>>
using ScopedArenaSafeMap =
SafeMap<K, V, Comparator, ScopedArenaAllocatorAdapter<std::pair<const K, V>>>;
+template <typename T,
+ typename EmptyFn = DefaultEmptyFn<T>,
+ typename HashFn = std::hash<T>,
+ typename Pred = std::equal_to<T>>
+using ScopedArenaHashSet = HashSet<T, EmptyFn, HashFn, Pred, ScopedArenaAllocatorAdapter<T>>;
+
+template <typename Key,
+ typename Value,
+ typename EmptyFn = DefaultEmptyFn<std::pair<Key, Value>>,
+ typename HashFn = std::hash<Key>,
+ typename Pred = std::equal_to<Key>>
+using ScopedArenaHashMap = HashMap<Key,
+ Value,
+ EmptyFn,
+ HashFn,
+ Pred,
+ ScopedArenaAllocatorAdapter<std::pair<Key, Value>>>;
+
template <typename K, typename V, class Hash = std::hash<K>, class KeyEqual = std::equal_to<K>>
using ScopedArenaUnorderedMap =
std::unordered_map<K, V, Hash, KeyEqual, ScopedArenaAllocatorAdapter<std::pair<const K, V>>>;
-
// Implementation details below.
template <>
@@ -79,12 +102,12 @@
typedef ScopedArenaAllocatorAdapter<U> other;
};
- explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* arena_allocator,
+ explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* allocator,
ArenaAllocKind kind = kArenaAllocSTL)
- : DebugStackReference(arena_allocator),
- DebugStackIndirectTopRef(arena_allocator),
+ : DebugStackReference(allocator),
+ DebugStackIndirectTopRef(allocator),
ArenaAllocatorAdapterKind(kind),
- arena_stack_(arena_allocator->arena_stack_) {
+ arena_stack_(allocator->arena_stack_) {
}
template <typename U>
ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter<U>& other) // NOLINT, implicit
@@ -122,12 +145,12 @@
typedef ScopedArenaAllocatorAdapter<U> other;
};
- explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* arena_allocator,
+ explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* allocator,
ArenaAllocKind kind = kArenaAllocSTL)
- : DebugStackReference(arena_allocator),
- DebugStackIndirectTopRef(arena_allocator),
+ : DebugStackReference(allocator),
+ DebugStackIndirectTopRef(allocator),
ArenaAllocatorAdapterKind(kind),
- arena_stack_(arena_allocator->arena_stack_) {
+ arena_stack_(allocator->arena_stack_) {
}
template <typename U>
ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter<U>& other) // NOLINT, implicit
diff --git a/runtime/base/unix_file/fd_file.cc b/runtime/base/unix_file/fd_file.cc
index 6d1de00..792c581 100644
--- a/runtime/base/unix_file/fd_file.cc
+++ b/runtime/base/unix_file/fd_file.cc
@@ -163,24 +163,36 @@
moveUp(GuardState::kClosed, nullptr);
}
+#if defined(__linux__)
+ // close always succeeds on linux, even if failure is reported.
+ UNUSED(result);
+#else
if (result == -1) {
return -errno;
- } else {
- fd_ = -1;
- file_path_ = "";
- return 0;
}
+#endif
+
+ fd_ = -1;
+ file_path_ = "";
+ return 0;
}
int FdFile::Flush() {
DCHECK(!read_only_mode_);
+
#ifdef __linux__
int rc = TEMP_FAILURE_RETRY(fdatasync(fd_));
#else
int rc = TEMP_FAILURE_RETRY(fsync(fd_));
#endif
+
moveUp(GuardState::kFlushed, "Flushing closed file.");
- return (rc == -1) ? -errno : rc;
+ if (rc == 0) {
+ return 0;
+ }
+
+ // Don't report failure if we just tried to flush a pipe or socket.
+ return errno == EINVAL ? 0 : -errno;
}
int64_t FdFile::Read(char* buf, int64_t byte_count, int64_t offset) const {
diff --git a/runtime/base/unix_file/fd_file.h b/runtime/base/unix_file/fd_file.h
index 91b08bc..3fb70f6 100644
--- a/runtime/base/unix_file/fd_file.h
+++ b/runtime/base/unix_file/fd_file.h
@@ -94,6 +94,7 @@
int SetLength(int64_t new_length) OVERRIDE WARN_UNUSED;
int64_t GetLength() const OVERRIDE;
int64_t Write(const char* buf, int64_t byte_count, int64_t offset) OVERRIDE WARN_UNUSED;
+
int Flush() OVERRIDE WARN_UNUSED;
// Short for SetLength(0); Flush(); Close();
diff --git a/runtime/base/unix_file/fd_file_test.cc b/runtime/base/unix_file/fd_file_test.cc
index 8b1a115..042fbc9 100644
--- a/runtime/base/unix_file/fd_file_test.cc
+++ b/runtime/base/unix_file/fd_file_test.cc
@@ -274,4 +274,15 @@
EXPECT_EQ(reset_compare(tmp, tmp6), 0);
}
+TEST_F(FdFileTest, PipeFlush) {
+ int pipefd[2];
+ ASSERT_EQ(0, pipe2(pipefd, O_CLOEXEC));
+
+ FdFile file(pipefd[1], true);
+ ASSERT_TRUE(file.WriteFully("foo", 3));
+ ASSERT_EQ(0, file.Flush());
+ ASSERT_EQ(0, file.FlushCloseOrErase());
+ close(pipefd[0]);
+}
+
} // namespace unix_file
diff --git a/runtime/base/variant_map.h b/runtime/base/variant_map.h
index d87df87..71a1018 100644
--- a/runtime/base/variant_map.h
+++ b/runtime/base/variant_map.h
@@ -237,6 +237,14 @@
return (ptr == nullptr) ? key.CreateDefaultValue() : *ptr;
}
+ template <typename T, typename U>
+ void AssignIfExists(const TKey<T>& key, U* out) {
+ DCHECK(out != nullptr);
+ if (Exists(key)) {
+ *out = std::move(*Get(key));
+ }
+ }
+
private:
// TODO: move to detail, or make it more generic like a ScopeGuard(function)
template <typename TValue>
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 772f042..0cc2622 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1768,7 +1768,11 @@
// We cannot do that for app image even after the fixup as some interned
// String references may actually end up pointing to moveable Strings.
uint8_t* const_section_begin = space->Begin() + header.GetBootImageConstantTablesOffset();
- mprotect(const_section_begin, header.GetBootImageConstantTablesSize(), PROT_READ);
+ CheckedCall(mprotect,
+ "protect constant tables",
+ const_section_begin,
+ header.GetBootImageConstantTablesSize(),
+ PROT_READ);
}
ClassTable* class_table = nullptr;
@@ -2862,6 +2866,11 @@
return true;
}
+ if (Thread::Current()->IsAsyncExceptionPending()) {
+ // Force use of interpreter to handle async-exceptions
+ return true;
+ }
+
if (runtime->IsJavaDebuggable()) {
// For simplicity, we ignore precompiled code and go to the interpreter
// assuming we don't already have jitted code.
@@ -8033,6 +8042,15 @@
return type.Get();
}
+mirror::MethodType* ClassLinker::ResolveMethodType(uint32_t proto_idx, ArtMethod* referrer) {
+ Thread* const self = Thread::Current();
+ StackHandleScope<2> hs(self);
+ const DexFile* dex_file = referrer->GetDexFile();
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(referrer->GetClassLoader()));
+ return ResolveMethodType(*dex_file, proto_idx, dex_cache, class_loader);
+}
+
mirror::MethodHandle* ClassLinker::ResolveMethodHandleForField(
Thread* self,
const DexFile::MethodHandleItem& method_handle,
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index f97433c..e436b99 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -371,6 +371,9 @@
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
+ mirror::MethodType* ResolveMethodType(uint32_t proto_idx, ArtMethod* referrer)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
// Resolve a method handle with a given ID from the DexFile. The
// result is not cached in the DexCache as the instance will only be
// used once in most circumstances.
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index be157a3..2e776b0 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -66,8 +66,10 @@
// Dex version 036 skipped because of an old dalvik bug on some versions of android where dex
// files with that version number would erroneously be accepted and run.
{'0', '3', '7', '\0'},
- // Dex version 038: Android "O" and beyond.
- {'0', '3', '8', '\0'}
+ // Dex version 038: Android "O".
+ {'0', '3', '8', '\0'},
+ // Dex verion 039: Beyond Android "O".
+ {'0', '3', '9', '\0'},
};
uint32_t DexFile::CalculateChecksum() const {
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index ac91d52..516b379 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -47,7 +47,7 @@
static const uint32_t kClassDefinitionOrderEnforcedVersion = 37;
static const uint8_t kDexMagic[];
- static constexpr size_t kNumDexVersions = 3;
+ static constexpr size_t kNumDexVersions = 4;
static constexpr size_t kDexVersionLen = 4;
static const uint8_t kDexMagicVersions[kNumDexVersions][kDexVersionLen];
@@ -496,11 +496,7 @@
// classes*.dex path.
static std::string GetBaseLocation(const char* location) {
const char* pos = strrchr(location, kMultiDexSeparator);
- if (pos == nullptr) {
- return location;
- } else {
- return std::string(location, pos - location);
- }
+ return (pos == nullptr) ? location : std::string(location, pos - location);
}
static std::string GetBaseLocation(const std::string& location) {
@@ -512,11 +508,7 @@
// The kMultiDexSeparator is included in the returned suffix.
static std::string GetMultiDexSuffix(const std::string& location) {
size_t pos = location.rfind(kMultiDexSeparator);
- if (pos == std::string::npos) {
- return "";
- } else {
- return location.substr(pos);
- }
+ return (pos == std::string::npos) ? std::string() : location.substr(pos);
}
std::string GetBaseLocation() const {
@@ -733,11 +725,10 @@
const TypeList* GetInterfacesList(const ClassDef& class_def) const {
if (class_def.interfaces_off_ == 0) {
- return nullptr;
- } else {
- const uint8_t* addr = begin_ + class_def.interfaces_off_;
- return reinterpret_cast<const TypeList*>(addr);
+ return nullptr;
}
+ const uint8_t* addr = begin_ + class_def.interfaces_off_;
+ return reinterpret_cast<const TypeList*>(addr);
}
uint32_t NumMethodHandles() const {
@@ -760,11 +751,7 @@
// Returns a pointer to the raw memory mapped class_data_item
const uint8_t* GetClassData(const ClassDef& class_def) const {
- if (class_def.class_data_off_ == 0) {
- return nullptr;
- } else {
- return begin_ + class_def.class_data_off_;
- }
+ return (class_def.class_data_off_ == 0) ? nullptr : begin_ + class_def.class_data_off_;
}
//
@@ -772,10 +759,9 @@
DCHECK_LT(code_off, size_) << "Code item offset larger then maximum allowed offset";
if (code_off == 0) {
return nullptr; // native or abstract method
- } else {
- const uint8_t* addr = begin_ + code_off;
- return reinterpret_cast<const CodeItem*>(addr);
}
+ const uint8_t* addr = begin_ + code_off;
+ return reinterpret_cast<const CodeItem*>(addr);
}
const char* GetReturnTypeDescriptor(const ProtoId& proto_id) const;
@@ -820,20 +806,13 @@
const char* GetShorty(uint32_t proto_idx) const;
const TypeList* GetProtoParameters(const ProtoId& proto_id) const {
- if (proto_id.parameters_off_ == 0) {
- return nullptr;
- } else {
- const uint8_t* addr = begin_ + proto_id.parameters_off_;
- return reinterpret_cast<const TypeList*>(addr);
- }
+ return (proto_id.parameters_off_ == 0)
+ ? nullptr
+ : reinterpret_cast<const TypeList*>(begin_ + proto_id.parameters_off_);
}
const uint8_t* GetEncodedStaticFieldValuesArray(const ClassDef& class_def) const {
- if (class_def.static_values_off_ == 0) {
- return 0;
- } else {
- return begin_ + class_def.static_values_off_;
- }
+ return (class_def.static_values_off_ == 0) ? 0 : begin_ + class_def.static_values_off_;
}
const uint8_t* GetCallSiteEncodedValuesArray(const CallSiteIdItem& call_site_id) const {
@@ -860,27 +839,18 @@
// Check that the offset is in bounds.
// Note that although the specification says that 0 should be used if there
// is no debug information, some applications incorrectly use 0xFFFFFFFF.
- if (code_item->debug_info_off_ == 0 || code_item->debug_info_off_ >= size_) {
- return nullptr;
- } else {
- return begin_ + code_item->debug_info_off_;
- }
+ const uint32_t debug_info_off = code_item->debug_info_off_;
+ return (debug_info_off == 0 || debug_info_off >= size_) ? nullptr : begin_ + debug_info_off;
}
struct PositionInfo {
- PositionInfo()
- : address_(0),
- line_(0),
- source_file_(nullptr),
- prologue_end_(false),
- epilogue_begin_(false) {
- }
+ PositionInfo() = default;
- uint32_t address_; // In 16-bit code units.
- uint32_t line_; // Source code line number starting at 1.
- const char* source_file_; // nullptr if the file from ClassDef still applies.
- bool prologue_end_;
- bool epilogue_begin_;
+ uint32_t address_ = 0; // In 16-bit code units.
+ uint32_t line_ = 0; // Source code line number starting at 1.
+ const char* source_file_ = nullptr; // nullptr if the file from ClassDef still applies.
+ bool prologue_end_ = false;
+ bool epilogue_begin_ = false;
};
// Callback for "new position table entry".
@@ -888,23 +858,15 @@
typedef bool (*DexDebugNewPositionCb)(void* context, const PositionInfo& entry);
struct LocalInfo {
- LocalInfo()
- : name_(nullptr),
- descriptor_(nullptr),
- signature_(nullptr),
- start_address_(0),
- end_address_(0),
- reg_(0),
- is_live_(false) {
- }
+ LocalInfo() = default;
- const char* name_; // E.g., list. It can be nullptr if unknown.
- const char* descriptor_; // E.g., Ljava/util/LinkedList;
- const char* signature_; // E.g., java.util.LinkedList<java.lang.Integer>
- uint32_t start_address_; // PC location where the local is first defined.
- uint32_t end_address_; // PC location where the local is no longer defined.
- uint16_t reg_; // Dex register which stores the values.
- bool is_live_; // Is the local defined and live.
+ const char* name_ = nullptr; // E.g., list. It can be nullptr if unknown.
+ const char* descriptor_ = nullptr; // E.g., Ljava/util/LinkedList;
+ const char* signature_ = nullptr; // E.g., java.util.LinkedList<java.lang.Integer>
+ uint32_t start_address_ = 0; // PC location where the local is first defined.
+ uint32_t end_address_ = 0; // PC location where the local is no longer defined.
+ uint16_t reg_ = 0; // Dex register which stores the values.
+ bool is_live_ = false; // Is the local defined and live.
};
// Callback for "new locals table entry".
@@ -913,98 +875,82 @@
static bool LineNumForPcCb(void* context, const PositionInfo& entry);
const AnnotationsDirectoryItem* GetAnnotationsDirectory(const ClassDef& class_def) const {
- if (class_def.annotations_off_ == 0) {
- return nullptr;
- } else {
- return reinterpret_cast<const AnnotationsDirectoryItem*>(begin_ + class_def.annotations_off_);
- }
+ return (class_def.annotations_off_ == 0)
+ ? nullptr
+ : reinterpret_cast<const AnnotationsDirectoryItem*>(begin_ + class_def.annotations_off_);
}
const AnnotationSetItem* GetClassAnnotationSet(const AnnotationsDirectoryItem* anno_dir) const {
- if (anno_dir->class_annotations_off_ == 0) {
- return nullptr;
- } else {
- return reinterpret_cast<const AnnotationSetItem*>(begin_ + anno_dir->class_annotations_off_);
- }
+ return (anno_dir->class_annotations_off_ == 0)
+ ? nullptr
+ : reinterpret_cast<const AnnotationSetItem*>(begin_ + anno_dir->class_annotations_off_);
}
const FieldAnnotationsItem* GetFieldAnnotations(const AnnotationsDirectoryItem* anno_dir) const {
- if (anno_dir->fields_size_ == 0) {
- return nullptr;
- } else {
- return reinterpret_cast<const FieldAnnotationsItem*>(&anno_dir[1]);
- }
+ return (anno_dir->fields_size_ == 0)
+ ? nullptr
+ : reinterpret_cast<const FieldAnnotationsItem*>(&anno_dir[1]);
}
const MethodAnnotationsItem* GetMethodAnnotations(const AnnotationsDirectoryItem* anno_dir)
const {
if (anno_dir->methods_size_ == 0) {
return nullptr;
- } else {
- // Skip past the header and field annotations.
- const uint8_t* addr = reinterpret_cast<const uint8_t*>(&anno_dir[1]);
- addr += anno_dir->fields_size_ * sizeof(FieldAnnotationsItem);
- return reinterpret_cast<const MethodAnnotationsItem*>(addr);
}
+ // Skip past the header and field annotations.
+ const uint8_t* addr = reinterpret_cast<const uint8_t*>(&anno_dir[1]);
+ addr += anno_dir->fields_size_ * sizeof(FieldAnnotationsItem);
+ return reinterpret_cast<const MethodAnnotationsItem*>(addr);
}
const ParameterAnnotationsItem* GetParameterAnnotations(const AnnotationsDirectoryItem* anno_dir)
const {
if (anno_dir->parameters_size_ == 0) {
return nullptr;
- } else {
- // Skip past the header, field annotations, and method annotations.
- const uint8_t* addr = reinterpret_cast<const uint8_t*>(&anno_dir[1]);
- addr += anno_dir->fields_size_ * sizeof(FieldAnnotationsItem);
- addr += anno_dir->methods_size_ * sizeof(MethodAnnotationsItem);
- return reinterpret_cast<const ParameterAnnotationsItem*>(addr);
}
+ // Skip past the header, field annotations, and method annotations.
+ const uint8_t* addr = reinterpret_cast<const uint8_t*>(&anno_dir[1]);
+ addr += anno_dir->fields_size_ * sizeof(FieldAnnotationsItem);
+ addr += anno_dir->methods_size_ * sizeof(MethodAnnotationsItem);
+ return reinterpret_cast<const ParameterAnnotationsItem*>(addr);
}
const AnnotationSetItem* GetFieldAnnotationSetItem(const FieldAnnotationsItem& anno_item) const {
uint32_t offset = anno_item.annotations_off_;
- if (offset == 0) {
- return nullptr;
- } else {
- return reinterpret_cast<const AnnotationSetItem*>(begin_ + offset);
- }
+ return (offset == 0)
+ ? nullptr
+ : reinterpret_cast<const AnnotationSetItem*>(begin_ + offset);
}
const AnnotationSetItem* GetMethodAnnotationSetItem(const MethodAnnotationsItem& anno_item)
const {
uint32_t offset = anno_item.annotations_off_;
- if (offset == 0) {
- return nullptr;
- } else {
- return reinterpret_cast<const AnnotationSetItem*>(begin_ + offset);
- }
+ return (offset == 0)
+ ? nullptr
+ : reinterpret_cast<const AnnotationSetItem*>(begin_ + offset);
}
const AnnotationSetRefList* GetParameterAnnotationSetRefList(
const ParameterAnnotationsItem* anno_item) const {
uint32_t offset = anno_item->annotations_off_;
- if (offset == 0) {
- return nullptr;
- }
- return reinterpret_cast<const AnnotationSetRefList*>(begin_ + offset);
+ return (offset == 0)
+ ? nullptr
+ : reinterpret_cast<const AnnotationSetRefList*>(begin_ + offset);
}
const AnnotationItem* GetAnnotationItem(const AnnotationSetItem* set_item, uint32_t index) const {
DCHECK_LE(index, set_item->size_);
uint32_t offset = set_item->entries_[index];
- if (offset == 0) {
- return nullptr;
- } else {
- return reinterpret_cast<const AnnotationItem*>(begin_ + offset);
- }
+ return (offset == 0)
+ ? nullptr
+ : reinterpret_cast<const AnnotationItem*>(begin_ + offset);
}
const AnnotationSetItem* GetSetRefItemItem(const AnnotationSetRefItem* anno_item) const {
uint32_t offset = anno_item->annotations_off_;
- if (offset == 0) {
- return nullptr;
- }
- return reinterpret_cast<const AnnotationSetItem*>(begin_ + offset);
+ return (offset == 0)
+ ? nullptr
+ : reinterpret_cast<const AnnotationSetItem*>(begin_ + offset);
}
// Debug info opcodes and constants
@@ -1252,7 +1198,7 @@
class DexFileParameterIterator {
public:
DexFileParameterIterator(const DexFile& dex_file, const DexFile::ProtoId& proto_id)
- : dex_file_(dex_file), size_(0), pos_(0) {
+ : dex_file_(dex_file) {
type_list_ = dex_file_.GetProtoParameters(proto_id);
if (type_list_ != nullptr) {
size_ = type_list_->Size();
@@ -1269,9 +1215,9 @@
}
private:
const DexFile& dex_file_;
- const DexFile::TypeList* type_list_;
- uint32_t size_;
- uint32_t pos_;
+ const DexFile::TypeList* type_list_ = nullptr;
+ uint32_t size_ = 0;
+ uint32_t pos_ = 0;
DISALLOW_IMPLICIT_CONSTRUCTORS(DexFileParameterIterator);
};
@@ -1298,13 +1244,12 @@
Signature(const DexFile* dex, const DexFile::ProtoId& proto) : dex_file_(dex), proto_id_(&proto) {
}
- Signature() : dex_file_(nullptr), proto_id_(nullptr) {
- }
+ Signature() = default;
friend class DexFile;
- const DexFile* const dex_file_;
- const DexFile::ProtoId* const proto_id_;
+ const DexFile* const dex_file_ = nullptr;
+ const DexFile::ProtoId* const proto_id_ = nullptr;
};
std::ostream& operator<<(std::ostream& os, const Signature& sig);
@@ -1583,44 +1528,44 @@
std::ostream& operator<<(std::ostream& os, const CallSiteArrayValueIterator::ValueType& code);
class CatchHandlerIterator {
- public:
- CatchHandlerIterator(const DexFile::CodeItem& code_item, uint32_t address);
+ public:
+ CatchHandlerIterator(const DexFile::CodeItem& code_item, uint32_t address);
- CatchHandlerIterator(const DexFile::CodeItem& code_item,
- const DexFile::TryItem& try_item);
+ CatchHandlerIterator(const DexFile::CodeItem& code_item,
+ const DexFile::TryItem& try_item);
- explicit CatchHandlerIterator(const uint8_t* handler_data) {
- Init(handler_data);
- }
+ explicit CatchHandlerIterator(const uint8_t* handler_data) {
+ Init(handler_data);
+ }
- dex::TypeIndex GetHandlerTypeIndex() const {
- return handler_.type_idx_;
- }
- uint32_t GetHandlerAddress() const {
- return handler_.address_;
- }
- void Next();
- bool HasNext() const {
- return remaining_count_ != -1 || catch_all_;
- }
- // End of this set of catch blocks, convenience method to locate next set of catch blocks
- const uint8_t* EndDataPointer() const {
- CHECK(!HasNext());
- return current_data_;
- }
+ dex::TypeIndex GetHandlerTypeIndex() const {
+ return handler_.type_idx_;
+ }
+ uint32_t GetHandlerAddress() const {
+ return handler_.address_;
+ }
+ void Next();
+ bool HasNext() const {
+ return remaining_count_ != -1 || catch_all_;
+ }
+ // End of this set of catch blocks, convenience method to locate next set of catch blocks
+ const uint8_t* EndDataPointer() const {
+ CHECK(!HasNext());
+ return current_data_;
+ }
- private:
- void Init(const DexFile::CodeItem& code_item, int32_t offset);
- void Init(const uint8_t* handler_data);
+ private:
+ void Init(const DexFile::CodeItem& code_item, int32_t offset);
+ void Init(const uint8_t* handler_data);
- struct CatchHandlerItem {
- dex::TypeIndex type_idx_; // type index of the caught exception type
- uint32_t address_; // handler address
- } handler_;
- const uint8_t* current_data_; // the current handler in dex file.
- int32_t remaining_count_; // number of handlers not read.
- bool catch_all_; // is there a handler that will catch all exceptions in case
- // that all typed handler does not match.
+ struct CatchHandlerItem {
+ dex::TypeIndex type_idx_; // type index of the caught exception type
+ uint32_t address_; // handler address
+ } handler_;
+ const uint8_t* current_data_; // the current handler in dex file.
+ int32_t remaining_count_; // number of handlers not read.
+ bool catch_all_; // is there a handler that will catch all exceptions in case
+ // that all typed handler does not match.
};
} // namespace art
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index a7bf59e..67cd428 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -75,7 +75,7 @@
"AAACAAAAQAEAAAEgAAACAAAAVAEAAAYgAAACAAAAiAEAAAEQAAABAAAAqAEAAAIgAAAPAAAArgEA"
"AAMgAAACAAAAiAIAAAQgAAADAAAAlAIAAAAgAAACAAAAqwIAAAAQAAABAAAAxAIAAA==";
-// kRawDex38 and 39 are dex'ed versions of the following Java source :
+// kRawDex{38,39,40,41} are dex'ed versions of the following Java source :
//
// public class Main {
// public static void main(String[] foo) {
@@ -108,6 +108,30 @@
"uAAAAAYAAAABAAAA0AAAAAEgAAACAAAA8AAAAAEQAAABAAAAHAEAAAIgAAAIAAAAIgEAAAMgAAAC"
"AAAAcwEAAAAgAAABAAAAfgEAAAAQAAABAAAAjAEAAA==";
+static const char kRawDex40[] =
+ "ZGV4CjA0MAC4OovJlJ1089ikzK6asMf/f8qp3Kve5VsgAgAAcAAAAHhWNBIAAAAAAAAAAIwBAAAI"
+ "AAAAcAAAAAQAAACQAAAAAgAAAKAAAAAAAAAAAAAAAAMAAAC4AAAAAQAAANAAAAAwAQAA8AAAACIB"
+ "AAAqAQAAMgEAAEYBAABRAQAAVAEAAFgBAABtAQAAAQAAAAIAAAAEAAAABgAAAAQAAAACAAAAAAAA"
+ "AAUAAAACAAAAHAEAAAAAAAAAAAAAAAABAAcAAAABAAAAAAAAAAAAAAABAAAAAQAAAAAAAAADAAAA"
+ "AAAAAH4BAAAAAAAAAQABAAEAAABzAQAABAAAAHAQAgAAAA4AAQABAAAAAAB4AQAAAQAAAA4AAAAB"
+ "AAAAAwAGPGluaXQ+AAZMTWFpbjsAEkxqYXZhL2xhbmcvT2JqZWN0OwAJTWFpbi5qYXZhAAFWAAJW"
+ "TAATW0xqYXZhL2xhbmcvU3RyaW5nOwAEbWFpbgABAAcOAAMBAAcOAAAAAgAAgYAE8AEBCYgCDAAA"
+ "AAAAAAABAAAAAAAAAAEAAAAIAAAAcAAAAAIAAAAEAAAAkAAAAAMAAAACAAAAoAAAAAUAAAADAAAA"
+ "uAAAAAYAAAABAAAA0AAAAAEgAAACAAAA8AAAAAEQAAABAAAAHAEAAAIgAAAIAAAAIgEAAAMgAAAC"
+ "AAAAcwEAAAAgAAABAAAAfgEAAAAQAAABAAAAjAEAAA==";
+
+static const char kRawDex41[] =
+ "ZGV4CjA0MQC4OovJlJ1089ikzK6asMf/f8qp3Kve5VsgAgAAcAAAAHhWNBIAAAAAAAAAAIwBAAAI"
+ "AAAAcAAAAAQAAACQAAAAAgAAAKAAAAAAAAAAAAAAAAMAAAC4AAAAAQAAANAAAAAwAQAA8AAAACIB"
+ "AAAqAQAAMgEAAEYBAABRAQAAVAEAAFgBAABtAQAAAQAAAAIAAAAEAAAABgAAAAQAAAACAAAAAAAA"
+ "AAUAAAACAAAAHAEAAAAAAAAAAAAAAAABAAcAAAABAAAAAAAAAAAAAAABAAAAAQAAAAAAAAADAAAA"
+ "AAAAAH4BAAAAAAAAAQABAAEAAABzAQAABAAAAHAQAgAAAA4AAQABAAAAAAB4AQAAAQAAAA4AAAAB"
+ "AAAAAwAGPGluaXQ+AAZMTWFpbjsAEkxqYXZhL2xhbmcvT2JqZWN0OwAJTWFpbi5qYXZhAAFWAAJW"
+ "TAATW0xqYXZhL2xhbmcvU3RyaW5nOwAEbWFpbgABAAcOAAMBAAcOAAAAAgAAgYAE8AEBCYgCDAAA"
+ "AAAAAAABAAAAAAAAAAEAAAAIAAAAcAAAAAIAAAAEAAAAkAAAAAMAAAACAAAAoAAAAAUAAAADAAAA"
+ "uAAAAAYAAAABAAAA0AAAAAEgAAACAAAA8AAAAAEQAAABAAAAHAEAAAIgAAAIAAAAIgEAAAMgAAAC"
+ "AAAAcwEAAAAgAAABAAAAfgEAAAAQAAABAAAAjAEAAA==";
+
static const char kRawDexZeroLength[] =
"UEsDBAoAAAAAAOhxAkkAAAAAAAAAAAAAAAALABwAY2xhc3Nlcy5kZXhVVAkAA2QNoVdnDaFXdXgL"
"AAEE5AMBAASIEwAAUEsBAh4DCgAAAAAA6HECSQAAAAAAAAAAAAAAAAsAGAAAAAAAAAAAAKCBAAAA"
@@ -323,10 +347,31 @@
EXPECT_EQ(38u, header.GetVersion());
}
-TEST_F(DexFileTest, Version39Rejected) {
+TEST_F(DexFileTest, Version39Accepted) {
+ ScratchFile tmp;
+ std::unique_ptr<const DexFile> raw(OpenDexFileBase64(kRawDex39, tmp.GetFilename().c_str()));
+ ASSERT_TRUE(raw.get() != nullptr);
+
+ const DexFile::Header& header = raw->GetHeader();
+ EXPECT_EQ(39u, header.GetVersion());
+}
+
+TEST_F(DexFileTest, Version40Rejected) {
ScratchFile tmp;
const char* location = tmp.GetFilename().c_str();
- DecodeAndWriteDexFile(kRawDex39, location);
+ DecodeAndWriteDexFile(kRawDex40, location);
+
+ ScopedObjectAccess soa(Thread::Current());
+ static constexpr bool kVerifyChecksum = true;
+ std::string error_msg;
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ ASSERT_FALSE(DexFile::Open(location, location, kVerifyChecksum, &error_msg, &dex_files));
+}
+
+TEST_F(DexFileTest, Version41Rejected) {
+ ScratchFile tmp;
+ const char* location = tmp.GetFilename().c_str();
+ DecodeAndWriteDexFile(kRawDex41, location);
ScopedObjectAccess soa(Thread::Current());
static constexpr bool kVerifyChecksum = true;
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index 9a17576..2f28dff 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -135,6 +135,8 @@
kIndexVtableOffset, // vtable offset (for static linked methods)
kIndexMethodAndProtoRef, // method and a proto reference index (for invoke-polymorphic)
kIndexCallSiteRef, // call site reference index
+ kIndexMethodHandleRef, // constant method handle reference index
+ kIndexProtoRef, // prototype reference index
};
enum Flags : uint8_t {
@@ -195,7 +197,9 @@
kVerifyRuntimeOnly = 0x0200000,
kVerifyError = 0x0400000,
kVerifyRegHPrototype = 0x0800000,
- kVerifyRegBCallSite = 0x1000000
+ kVerifyRegBCallSite = 0x1000000,
+ kVerifyRegBMethodHandle = 0x2000000,
+ kVerifyRegBPrototype = 0x4000000,
};
// Collect the enums in a struct for better locality.
diff --git a/runtime/dex_instruction_list.h b/runtime/dex_instruction_list.h
index d0a4ae5..ef83bdc 100644
--- a/runtime/dex_instruction_list.h
+++ b/runtime/dex_instruction_list.h
@@ -273,8 +273,8 @@
V(0xFB, INVOKE_POLYMORPHIC_RANGE, "invoke-polymorphic/range", k4rcc, kIndexMethodAndProtoRef, kContinue | kThrow | kInvoke, 0, kVerifyRegBMethod | kVerifyVarArgRangeNonZero | kVerifyRegHPrototype) \
V(0xFC, INVOKE_CUSTOM, "invoke-custom", k35c, kIndexCallSiteRef, kContinue | kThrow, 0, kVerifyRegBCallSite | kVerifyVarArg) \
V(0xFD, INVOKE_CUSTOM_RANGE, "invoke-custom/range", k3rc, kIndexCallSiteRef, kContinue | kThrow, 0, kVerifyRegBCallSite | kVerifyVarArgRange) \
- V(0xFE, UNUSED_FE, "unused-fe", k10x, kIndexUnknown, 0, 0, kVerifyError) \
- V(0xFF, UNUSED_FF, "unused-ff", k10x, kIndexUnknown, 0, 0, kVerifyError)
+ V(0xFE, CONST_METHOD_HANDLE, "const-method-handle", k21c, kIndexMethodHandleRef, kContinue | kThrow, 0, kVerifyRegA | kVerifyRegBMethodHandle) \
+ V(0xFF, CONST_METHOD_TYPE, "const-method-type", k21c, kIndexProtoRef, kContinue | kThrow, 0, kVerifyRegA | kVerifyRegBPrototype)
#define DEX_INSTRUCTION_FORMAT_LIST(V) \
V(k10x) \
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 813a264..ea7a83c 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -2185,20 +2185,11 @@
// Generic JNI trampoline at this stage; instead, method's
// annotations' classes are looked up in the bootstrap class
// loader's resolved types (which won't trigger an exception).
+ CHECK(!self->IsExceptionPending());
bool critical_native = called->IsAnnotatedWithCriticalNative();
- // ArtMethod::IsAnnotatedWithCriticalNative should not throw
- // an exception; clear it if it happened anyway.
- // TODO: Revisit this code path and turn this into a CHECK(!self->IsExceptionPending()).
- if (self->IsExceptionPending()) {
- self->ClearException();
- }
+ CHECK(!self->IsExceptionPending());
bool fast_native = called->IsAnnotatedWithFastNative();
- // ArtMethod::IsAnnotatedWithFastNative should not throw
- // an exception; clear it if it happened anyway.
- // TODO: Revisit this code path and turn this into a CHECK(!self->IsExceptionPending()).
- if (self->IsExceptionPending()) {
- self->ClearException();
- }
+ CHECK(!self->IsExceptionPending());
bool normal_native = !critical_native && !fast_native;
// Restore the initial ArtMethod pointer at `*sp`.
*sp = called;
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index 281dfd9..7c912d0 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -139,7 +139,8 @@
sizeof(void*) * kLockLevelCount);
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, flip_function, method_verifier, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, method_verifier, thread_local_mark_stack, sizeof(void*));
- EXPECT_OFFSET_DIFF(Thread, tlsPtr_.thread_local_mark_stack, Thread, wait_mutex_, sizeof(void*),
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_mark_stack, async_exception, sizeof(void*));
+ EXPECT_OFFSET_DIFF(Thread, tlsPtr_.async_exception, Thread, wait_mutex_, sizeof(void*),
thread_tlsptr_end);
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 6377c89..4004af2 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -3855,15 +3855,19 @@
}
void Heap::CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count) {
+ // Compare rounded sizes since the allocation may have been retried after rounding the size.
+ // See b/37885600
CHECK(c == nullptr || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
- (c->IsVariableSize() || c->GetObjectSize() == byte_count))
+ (c->IsVariableSize() ||
+ RoundUp(c->GetObjectSize(), kObjectAlignment) ==
+ RoundUp(byte_count, kObjectAlignment)))
<< "ClassFlags=" << c->GetClassFlags()
<< " IsClassClass=" << c->IsClassClass()
<< " byte_count=" << byte_count
<< " IsVariableSize=" << c->IsVariableSize()
<< " ObjectSize=" << c->GetObjectSize()
<< " sizeof(Class)=" << sizeof(mirror::Class)
- << verification_->DumpObjectInfo(c.Ptr(), /*tag*/ "klass");
+ << " " << verification_->DumpObjectInfo(c.Ptr(), /*tag*/ "klass");
CHECK_GE(byte_count, sizeof(mirror::Object));
}
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 7ec54f5..576a35c 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -60,7 +60,7 @@
// Protect memory beyond the starting size. morecore will add r/w permissions when necessory
uint8_t* end = mem_map->Begin() + starting_size;
if (capacity - starting_size > 0) {
- CHECK_MEMORY_CALL(mprotect, (end, capacity - starting_size, PROT_NONE), name);
+ CheckedCall(mprotect, name.c_str(), end, capacity - starting_size, PROT_NONE);
}
// Everything is set so record in immutable structure and leave
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 4597a96..45f4f82 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -453,7 +453,7 @@
madvise(obj, allocation_size, MADV_DONTNEED);
if (kIsDebugBuild) {
// Can't disallow reads since we use them to find next chunks during coalescing.
- mprotect(obj, allocation_size, PROT_READ);
+ CheckedCall(mprotect, __FUNCTION__, obj, allocation_size, PROT_READ);
}
return allocation_size;
}
@@ -519,7 +519,7 @@
// We always put our object at the start of the free block, there cannot be another free block
// before it.
if (kIsDebugBuild) {
- mprotect(obj, allocation_size, PROT_READ | PROT_WRITE);
+ CheckedCall(mprotect, __FUNCTION__, obj, allocation_size, PROT_READ | PROT_WRITE);
}
new_info->SetPrevFreeBytes(0);
new_info->SetByteSize(allocation_size, false);
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index c994127..dcb7837 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -141,7 +141,7 @@
// Should never be asked to increase the allocation beyond the capacity of the space. Enforced
// by mspace_set_footprint_limit.
CHECK_LE(new_end, Begin() + Capacity());
- CHECK_MEMORY_CALL(mprotect, (original_end, increment, PROT_READ | PROT_WRITE), GetName());
+ CheckedCall(mprotect, GetName(), original_end, increment, PROT_READ | PROT_WRITE);
} else {
// Should never be asked for negative footprint (ie before begin). Zero footprint is ok.
CHECK_GE(original_end + increment, Begin());
@@ -152,8 +152,8 @@
// removing ignoring the memory protection change here and in Space::CreateAllocSpace. It's
// likely just a useful debug feature.
size_t size = -increment;
- CHECK_MEMORY_CALL(madvise, (new_end, size, MADV_DONTNEED), GetName());
- CHECK_MEMORY_CALL(mprotect, (new_end, size, PROT_NONE), GetName());
+ CheckedCall(madvise, GetName(), new_end, size, MADV_DONTNEED);
+ CheckedCall(mprotect, GetName(), new_end, size, PROT_NONE);
}
// Update end_.
SetEnd(new_end);
@@ -201,7 +201,7 @@
// Protect memory beyond the initial size.
uint8_t* end = mem_map->Begin() + starting_size_;
if (capacity > initial_size_) {
- CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size_, PROT_NONE), alloc_space_name);
+ CheckedCall(mprotect, alloc_space_name, end, capacity - initial_size_, PROT_NONE);
}
*out_malloc_space = CreateInstance(mem_map.release(), alloc_space_name, allocator, End(), end,
limit_, growth_limit, CanMoveObjects());
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index f85ea46..a41ef43 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -33,16 +33,6 @@
class ZygoteSpace;
-// TODO: Remove define macro
-#define CHECK_MEMORY_CALL(call, args, what) \
- do { \
- int rc = call args; \
- if (UNLIKELY(rc != 0)) { \
- errno = rc; \
- PLOG(FATAL) << # call << " failed for " << (what); \
- } \
- } while (false)
-
// A common parent of DlMallocSpace and RosAllocSpace.
class MallocSpace : public ContinuousMemMapAllocSpace {
public:
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index b2e1fa5..a51df7c 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -254,7 +254,7 @@
static void ZeroAndProtectRegion(uint8_t* begin, uint8_t* end) {
ZeroAndReleasePages(begin, end - begin);
if (kProtectClearedRegions) {
- mprotect(begin, end - begin, PROT_NONE);
+ CheckedCall(mprotect, __FUNCTION__, begin, end - begin, PROT_NONE);
}
}
@@ -589,7 +589,7 @@
region_space->AdjustNonFreeRegionLimit(idx_);
type_ = RegionType::kRegionTypeToSpace;
if (kProtectClearedRegions) {
- mprotect(Begin(), kRegionSize, PROT_READ | PROT_WRITE);
+ CheckedCall(mprotect, __FUNCTION__, Begin(), kRegionSize, PROT_READ | PROT_WRITE);
}
}
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index eca0e43..5d1f191 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -71,7 +71,7 @@
// Protect memory beyond the starting size. MoreCore will add r/w permissions when necessory
uint8_t* end = mem_map->Begin() + starting_size;
if (capacity - starting_size > 0) {
- CHECK_MEMORY_CALL(mprotect, (end, capacity - starting_size, PROT_NONE), name);
+ CheckedCall(mprotect, name.c_str(), end, capacity - starting_size, PROT_NONE);
}
// Everything is set so record in immutable structure and leave
diff --git a/runtime/image.cc b/runtime/image.cc
index 0236f47..aae572b 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -26,7 +26,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '4', '8', '\0' }; // Map boot image tables.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '4', 'A', '\0' }; // VarHandle fence intrinsics
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 2dd4db3..2c8ec47 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -511,7 +511,7 @@
return true;
}
-size_t IndirectReferenceTable::FreeCapacity() {
+size_t IndirectReferenceTable::FreeCapacity() const {
return max_entries_ - segment_state_.top_index;
}
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index bf287b1..6675099 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -278,7 +278,9 @@
void AssertEmpty() REQUIRES_SHARED(Locks::mutator_lock_);
- void Dump(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
+ void Dump(std::ostream& os) const
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::alloc_tracker_lock_);
// Return the #of entries in the entire table. This includes holes, and
// so may be larger than the actual number of "live" entries.
@@ -291,7 +293,7 @@
REQUIRES_SHARED(Locks::mutator_lock_);
// See implementation of EnsureFreeCapacity. We'll only state here how much is trivially free,
// without recovering holes. Thus this is a conservative estimate.
- size_t FreeCapacity() REQUIRES_SHARED(Locks::mutator_lock_);
+ size_t FreeCapacity() const;
// Note IrtIterator does not have a read barrier as it's used to visit roots.
IrtIterator begin() {
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index c345013..5938113 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -594,6 +594,10 @@
uint16_t inst_data,
JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Make sure to check for async exceptions
+ if (UNLIKELY(self->ObserveAsyncException())) {
+ return false;
+ }
// Invoke-polymorphic instructions always take a receiver. i.e, they are never static.
const uint32_t vRegC = (is_range) ? inst->VRegC_4rcc() : inst->VRegC_45cc();
const int invoke_method_idx = (is_range) ? inst->VRegB_4rcc() : inst->VRegB_45cc();
@@ -899,6 +903,10 @@
uint16_t inst_data,
JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Make sure to check for async exceptions
+ if (UNLIKELY(self->ObserveAsyncException())) {
+ return false;
+ }
// invoke-custom is not supported in transactions. In transactions
// there is a limited set of types supported. invoke-custom allows
// running arbitrary code and instantiating arbitrary types.
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 50bd7e7..1c79619 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -169,6 +169,10 @@
const Instruction* inst,
uint16_t inst_data,
JValue* result) {
+ // Make sure to check for async exceptions before anything else.
+ if (UNLIKELY(self->ObserveAsyncException())) {
+ return false;
+ }
const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
const uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
ObjPtr<mirror::Object> receiver = (type == kStatic) ? nullptr : shadow_frame.GetVRegReference(vregC);
@@ -202,6 +206,25 @@
}
}
+static inline mirror::MethodHandle* ResolveMethodHandle(uint32_t method_handle_index,
+ ArtMethod* referrer)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ return class_linker->ResolveMethodHandle(method_handle_index, referrer);
+}
+
+static inline mirror::MethodType* ResolveMethodType(Thread* self,
+ uint32_t method_type_index,
+ ArtMethod* referrer)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ const DexFile* dex_file = referrer->GetDexFile();
+ StackHandleScope<2> hs(self);
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(referrer->GetClassLoader()));
+ return class_linker->ResolveMethodType(*dex_file, method_type_index, dex_cache, class_loader);
+}
+
// Performs a signature polymorphic invoke (invoke-polymorphic/invoke-polymorphic-range).
template<bool is_range>
bool DoInvokePolymorphic(Thread* self,
diff --git a/runtime/interpreter/interpreter_intrinsics.cc b/runtime/interpreter/interpreter_intrinsics.cc
index 74e6cd2..26de6b4 100644
--- a/runtime/interpreter/interpreter_intrinsics.cc
+++ b/runtime/interpreter/interpreter_intrinsics.cc
@@ -323,6 +323,25 @@
return true;
}
+#define VARHANDLE_FENCE_INTRINSIC(name, std_memory_operation) \
+static ALWAYS_INLINE bool name(ShadowFrame* /* shadow_frame */, \
+ const Instruction* /* inst */, \
+ uint16_t /* inst_data */, \
+ JValue* /* result_register */) \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
+ std::atomic_thread_fence(std_memory_operation); \
+ return true; \
+}
+
+// The VarHandle fence methods are static (unlike sun.misc.Unsafe versions).
+// The fences for the LoadLoadFence and StoreStoreFence are stronger
+// than strictly required, but the impact should be marginal.
+VARHANDLE_FENCE_INTRINSIC(MterpVarHandleFullFence, std::memory_order_seq_cst)
+VARHANDLE_FENCE_INTRINSIC(MterpVarHandleAcquireFence, std::memory_order_acquire)
+VARHANDLE_FENCE_INTRINSIC(MterpVarHandleReleaseFence, std::memory_order_release)
+VARHANDLE_FENCE_INTRINSIC(MterpVarHandleLoadLoadFence, std::memory_order_acquire)
+VARHANDLE_FENCE_INTRINSIC(MterpVarHandleStoreStoreFence, std::memory_order_release)
+
// Macro to help keep track of what's left to implement.
#define UNIMPLEMENTED_CASE(name) \
case Intrinsics::k##name: \
@@ -470,6 +489,11 @@
UNIMPLEMENTED_CASE(ReferenceGetReferent /* ()Ljava/lang/Object; */)
UNIMPLEMENTED_CASE(IntegerValueOf /* (I)Ljava/lang/Integer; */)
UNIMPLEMENTED_CASE(ThreadInterrupted /* ()Z */)
+ INTRINSIC_CASE(VarHandleFullFence)
+ INTRINSIC_CASE(VarHandleAcquireFence)
+ INTRINSIC_CASE(VarHandleReleaseFence)
+ INTRINSIC_CASE(VarHandleLoadLoadFence)
+ INTRINSIC_CASE(VarHandleStoreStoreFence)
case Intrinsics::kNone:
res = false;
break;
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 850419b..74d7901 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -102,6 +102,13 @@
} \
} while (false)
+#define HANDLE_ASYNC_EXCEPTION() \
+ if (UNLIKELY(self->ObserveAsyncException())) { \
+ HANDLE_PENDING_EXCEPTION(); \
+ break; \
+ } \
+ do {} while (false)
+
#define HANDLE_BACKWARD_BRANCH(offset) \
do { \
if (IsBackwardBranch(offset)) { \
@@ -525,8 +532,33 @@
}
break;
}
+ case Instruction::CONST_METHOD_HANDLE: {
+ PREAMBLE();
+ ObjPtr<mirror::MethodHandle> mh =
+ Runtime::Current()->GetClassLinker()->ResolveMethodHandle(inst->VRegB_21c(), method);
+ if (UNLIKELY(mh == nullptr)) {
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), mh.Ptr());
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
+ case Instruction::CONST_METHOD_TYPE: {
+ PREAMBLE();
+ ObjPtr<mirror::MethodType> mt =
+ Runtime::Current()->GetClassLinker()->ResolveMethodType(inst->VRegB_21c(), method);
+ if (UNLIKELY(mt == nullptr)) {
+ HANDLE_PENDING_EXCEPTION();
+ } else {
+ shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), mt.Ptr());
+ inst = inst->Next_2xx();
+ }
+ break;
+ }
case Instruction::MONITOR_ENTER: {
PREAMBLE();
+ HANDLE_ASYNC_EXCEPTION();
ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
@@ -539,6 +571,7 @@
}
case Instruction::MONITOR_EXIT: {
PREAMBLE();
+ HANDLE_ASYNC_EXCEPTION();
ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
@@ -686,6 +719,7 @@
}
case Instruction::THROW: {
PREAMBLE();
+ HANDLE_ASYNC_EXCEPTION();
ObjPtr<mirror::Object> exception =
shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
if (UNLIKELY(exception == nullptr)) {
@@ -704,6 +738,7 @@
}
case Instruction::GOTO: {
PREAMBLE();
+ HANDLE_ASYNC_EXCEPTION();
int8_t offset = inst->VRegA_10t(inst_data);
BRANCH_INSTRUMENTATION(offset);
inst = inst->RelativeAt(offset);
@@ -712,6 +747,7 @@
}
case Instruction::GOTO_16: {
PREAMBLE();
+ HANDLE_ASYNC_EXCEPTION();
int16_t offset = inst->VRegA_20t();
BRANCH_INSTRUMENTATION(offset);
inst = inst->RelativeAt(offset);
@@ -720,6 +756,7 @@
}
case Instruction::GOTO_32: {
PREAMBLE();
+ HANDLE_ASYNC_EXCEPTION();
int32_t offset = inst->VRegA_30t();
BRANCH_INSTRUMENTATION(offset);
inst = inst->RelativeAt(offset);
@@ -2435,10 +2472,8 @@
inst = inst->Next_2xx();
break;
case Instruction::UNUSED_3E ... Instruction::UNUSED_43:
+ case Instruction::UNUSED_79 ... Instruction::UNUSED_7A:
case Instruction::UNUSED_F3 ... Instruction::UNUSED_F9:
- case Instruction::UNUSED_FE ... Instruction::UNUSED_FF:
- case Instruction::UNUSED_79:
- case Instruction::UNUSED_7A:
UnexpectedOpcode(inst, shadow_frame);
}
} while (!interpret_one_instruction);
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index 6c24753..2318125 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -145,9 +145,14 @@
extern "C" size_t MterpShouldSwitchInterpreters()
REQUIRES_SHARED(Locks::mutator_lock_) {
- const instrumentation::Instrumentation* const instrumentation =
- Runtime::Current()->GetInstrumentation();
- return instrumentation->NonJitProfilingActive() || Dbg::IsDebuggerActive();
+ const Runtime* const runtime = Runtime::Current();
+ const instrumentation::Instrumentation* const instrumentation = runtime->GetInstrumentation();
+ return instrumentation->NonJitProfilingActive() ||
+ Dbg::IsDebuggerActive() ||
+ // An async exception has been thrown. We need to go to the switch interpreter. MTerp doesn't
+ // know how to deal with these so we could end up never dealing with it if we are in an
+ // infinite loop.
+ UNLIKELY(Thread::Current()->IsAsyncExceptionPending());
}
diff --git a/runtime/interpreter/shadow_frame.h b/runtime/interpreter/shadow_frame.h
index 6903af2..80fdadb 100644
--- a/runtime/interpreter/shadow_frame.h
+++ b/runtime/interpreter/shadow_frame.h
@@ -188,9 +188,7 @@
const uint32_t* vreg_ptr = &vregs_[i];
ref = reinterpret_cast<const StackReference<mirror::Object>*>(vreg_ptr)->AsMirrorPtr();
}
- if (kUseReadBarrier) {
- ReadBarrier::AssertToSpaceInvariant(ref);
- }
+ ReadBarrier::MaybeAssertToSpaceInvariant(ref);
if (kVerifyFlags & kVerifyReads) {
VerifyObject(ref);
}
@@ -256,9 +254,7 @@
if (kVerifyFlags & kVerifyWrites) {
VerifyObject(val);
}
- if (kUseReadBarrier) {
- ReadBarrier::AssertToSpaceInvariant(val);
- }
+ ReadBarrier::MaybeAssertToSpaceInvariant(val);
uint32_t* vreg = &vregs_[i];
reinterpret_cast<StackReference<mirror::Object>*>(vreg)->Assign(val);
if (HasReferenceArray()) {
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 5a16053..a72fa54 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -28,6 +28,8 @@
#include "check_jni.h"
#include "dex_file-inl.h"
#include "fault_handler.h"
+#include "gc/allocation_record.h"
+#include "gc/heap.h"
#include "gc_root-inl.h"
#include "indirect_reference_table-inl.h"
#include "jni_internal.h"
@@ -468,7 +470,11 @@
weak_globals_add_condition_("weak globals add condition",
(CHECK(Locks::jni_weak_globals_lock_ != nullptr),
*Locks::jni_weak_globals_lock_)),
- env_hooks_() {
+ env_hooks_(),
+ enable_allocation_tracking_delta_(
+ runtime_options.GetOrDefault(RuntimeArgumentMap::GlobalRefAllocStackTraceLimit)),
+ allocation_tracking_enabled_(false),
+ old_allocation_tracking_state_(false) {
functions = unchecked_functions_;
SetCheckJniEnabled(runtime_options.Exists(RuntimeArgumentMap::CheckJni));
}
@@ -583,18 +589,55 @@
return true;
}
+void JavaVMExt::CheckGlobalRefAllocationTracking() {
+ if (LIKELY(enable_allocation_tracking_delta_ == 0)) {
+ return;
+ }
+ size_t simple_free_capacity = globals_.FreeCapacity();
+ if (UNLIKELY(simple_free_capacity <= enable_allocation_tracking_delta_)) {
+ if (!allocation_tracking_enabled_) {
+ LOG(WARNING) << "Global reference storage appears close to exhaustion, program termination "
+ << "may be imminent. Enabling allocation tracking to improve abort diagnostics. "
+ << "This will result in program slow-down.";
+
+ old_allocation_tracking_state_ = runtime_->GetHeap()->IsAllocTrackingEnabled();
+ if (!old_allocation_tracking_state_) {
+ // Need to be guaranteed suspended.
+ ScopedObjectAccess soa(Thread::Current());
+ ScopedThreadSuspension sts(soa.Self(), ThreadState::kNative);
+ gc::AllocRecordObjectMap::SetAllocTrackingEnabled(true);
+ }
+ allocation_tracking_enabled_ = true;
+ }
+ } else {
+ if (UNLIKELY(allocation_tracking_enabled_)) {
+ if (!old_allocation_tracking_state_) {
+ // Need to be guaranteed suspended.
+ ScopedObjectAccess soa(Thread::Current());
+ ScopedThreadSuspension sts(soa.Self(), ThreadState::kNative);
+ gc::AllocRecordObjectMap::SetAllocTrackingEnabled(false);
+ }
+ allocation_tracking_enabled_ = true;
+ }
+ }
+}
+
jobject JavaVMExt::AddGlobalRef(Thread* self, ObjPtr<mirror::Object> obj) {
// Check for null after decoding the object to handle cleared weak globals.
if (obj == nullptr) {
return nullptr;
}
- WriterMutexLock mu(self, *Locks::jni_globals_lock_);
+ IndirectRef ref;
std::string error_msg;
- IndirectRef ref = globals_.Add(kIRTFirstSegment, obj, &error_msg);
+ {
+ WriterMutexLock mu(self, *Locks::jni_globals_lock_);
+ ref = globals_.Add(kIRTFirstSegment, obj, &error_msg);
+ }
if (UNLIKELY(ref == nullptr)) {
LOG(FATAL) << error_msg;
UNREACHABLE();
}
+ CheckGlobalRefAllocationTracking();
return reinterpret_cast<jobject>(ref);
}
@@ -625,11 +668,14 @@
if (obj == nullptr) {
return;
}
- WriterMutexLock mu(self, *Locks::jni_globals_lock_);
- if (!globals_.Remove(kIRTFirstSegment, obj)) {
- LOG(WARNING) << "JNI WARNING: DeleteGlobalRef(" << obj << ") "
- << "failed to find entry";
+ {
+ WriterMutexLock mu(self, *Locks::jni_globals_lock_);
+ if (!globals_.Remove(kIRTFirstSegment, obj)) {
+ LOG(WARNING) << "JNI WARNING: DeleteGlobalRef(" << obj << ") "
+ << "failed to find entry";
+ }
}
+ CheckGlobalRefAllocationTracking();
}
void JavaVMExt::DeleteWeakGlobalRef(Thread* self, jweak obj) {
diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h
index 50aabdc..0510d6a 100644
--- a/runtime/java_vm_ext.h
+++ b/runtime/java_vm_ext.h
@@ -123,7 +123,9 @@
void DumpReferenceTables(std::ostream& os)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!Locks::jni_globals_lock_, !Locks::jni_weak_globals_lock_);
+ REQUIRES(!Locks::jni_globals_lock_,
+ !Locks::jni_weak_globals_lock_,
+ !Locks::alloc_tracker_lock_);
bool SetCheckJniEnabled(bool enabled);
@@ -209,6 +211,8 @@
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::jni_weak_globals_lock_);
+ void CheckGlobalRefAllocationTracking();
+
Runtime* const runtime_;
// Used for testing. By default, we'll LOG(FATAL) the reason.
@@ -245,6 +249,10 @@
// TODO Maybe move this to Runtime.
std::vector<GetEnvHook> env_hooks_;
+ size_t enable_allocation_tracking_delta_;
+ std::atomic<bool> allocation_tracking_enabled_;
+ std::atomic<bool> old_allocation_tracking_state_;
+
DISALLOW_COPY_AND_ASSIGN(JavaVMExt);
};
diff --git a/runtime/java_vm_ext_test.cc b/runtime/java_vm_ext_test.cc
index 2cbfa81..5db493b 100644
--- a/runtime/java_vm_ext_test.cc
+++ b/runtime/java_vm_ext_test.cc
@@ -19,6 +19,7 @@
#include <pthread.h>
#include "common_runtime_test.h"
+#include "gc/heap.h"
#include "java_vm_ext.h"
#include "runtime.h"
@@ -134,4 +135,36 @@
EXPECT_EQ(JNI_ERR, err);
}
+class JavaVmExtStackTraceTest : public JavaVmExtTest {
+ protected:
+ void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+ options->emplace_back("-XX:GlobalRefAllocStackTraceLimit=50000", nullptr);
+ }
+};
+
+TEST_F(JavaVmExtStackTraceTest, TestEnableDisable) {
+ ASSERT_FALSE(Runtime::Current()->GetHeap()->IsAllocTrackingEnabled());
+
+ JNIEnv* env;
+ jint ok = vm_->AttachCurrentThread(&env, nullptr);
+ ASSERT_EQ(JNI_OK, ok);
+
+ std::vector<jobject> global_refs_;
+ jobject local_ref = env->NewStringUTF("Dummy");
+ for (size_t i = 0; i < 2000; ++i) {
+ global_refs_.push_back(env->NewGlobalRef(local_ref));
+ }
+
+ EXPECT_TRUE(Runtime::Current()->GetHeap()->IsAllocTrackingEnabled());
+
+ for (jobject global_ref : global_refs_) {
+ env->DeleteGlobalRef(global_ref);
+ }
+
+ EXPECT_FALSE(Runtime::Current()->GetHeap()->IsAllocTrackingEnabled());
+
+ ok = vm_->DetachCurrentThread();
+ EXPECT_EQ(JNI_OK, ok);
+}
+
} // namespace art
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 40a5212..e122c6d 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -51,15 +51,6 @@
static constexpr size_t kCodeSizeLogThreshold = 50 * KB;
static constexpr size_t kStackMapSizeLogThreshold = 50 * KB;
-#define CHECKED_MPROTECT(memory, size, prot) \
- do { \
- int rc = mprotect(memory, size, prot); \
- if (UNLIKELY(rc != 0)) { \
- errno = rc; \
- PLOG(FATAL) << "Failed to mprotect jit code cache"; \
- } \
- } while (false) \
-
JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
size_t max_capacity,
bool generate_debug_info,
@@ -173,8 +164,16 @@
SetFootprintLimit(current_capacity_);
- CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
- CHECKED_MPROTECT(data_map_->Begin(), data_map_->Size(), kProtData);
+ CheckedCall(mprotect,
+ "mprotect jit code cache",
+ code_map_->Begin(),
+ code_map_->Size(),
+ kProtCode);
+ CheckedCall(mprotect,
+ "mprotect jit data cache",
+ data_map_->Begin(),
+ data_map_->Size(),
+ kProtData);
VLOG(jit) << "Created jit code cache: initial data size="
<< PrettySize(initial_data_capacity)
@@ -203,14 +202,21 @@
code_map_(code_map),
only_for_tlb_shootdown_(only_for_tlb_shootdown) {
ScopedTrace trace("mprotect all");
- CHECKED_MPROTECT(
- code_map_->Begin(), only_for_tlb_shootdown_ ? kPageSize : code_map_->Size(), kProtAll);
+ CheckedCall(mprotect,
+ "make code writable",
+ code_map_->Begin(),
+ only_for_tlb_shootdown_ ? kPageSize : code_map_->Size(),
+ kProtAll);
}
~ScopedCodeCacheWrite() {
ScopedTrace trace("mprotect code");
- CHECKED_MPROTECT(
- code_map_->Begin(), only_for_tlb_shootdown_ ? kPageSize : code_map_->Size(), kProtCode);
+ CheckedCall(mprotect,
+ "make code protected",
+ code_map_->Begin(),
+ only_for_tlb_shootdown_ ? kPageSize : code_map_->Size(),
+ kProtCode);
}
+
private:
MemMap* const code_map_;
@@ -534,7 +540,7 @@
static void ClearMethodCounter(ArtMethod* method, bool was_warm) {
if (was_warm) {
- method->AddAccessFlags(kAccPreviouslyWarm);
+ method->SetPreviouslyWarm();
}
// We reset the counter to 1 so that the profile knows that the method was executed at least once.
// This is required for layout purposes.
diff --git a/runtime/jit/profile_compilation_info.cc b/runtime/jit/profile_compilation_info.cc
index 1ed7889..f9603a7 100644
--- a/runtime/jit/profile_compilation_info.cc
+++ b/runtime/jit/profile_compilation_info.cc
@@ -76,20 +76,20 @@
ProfileCompilationInfo::ProfileCompilationInfo(ArenaPool* custom_arena_pool)
: default_arena_pool_(),
- arena_(custom_arena_pool),
- info_(arena_.Adapter(kArenaAllocProfile)),
- profile_key_map_(std::less<const std::string>(), arena_.Adapter(kArenaAllocProfile)) {
+ allocator_(custom_arena_pool),
+ info_(allocator_.Adapter(kArenaAllocProfile)),
+ profile_key_map_(std::less<const std::string>(), allocator_.Adapter(kArenaAllocProfile)) {
}
ProfileCompilationInfo::ProfileCompilationInfo()
: default_arena_pool_(/*use_malloc*/true, /*low_4gb*/false, "ProfileCompilationInfo"),
- arena_(&default_arena_pool_),
- info_(arena_.Adapter(kArenaAllocProfile)),
- profile_key_map_(std::less<const std::string>(), arena_.Adapter(kArenaAllocProfile)) {
+ allocator_(&default_arena_pool_),
+ info_(allocator_.Adapter(kArenaAllocProfile)),
+ profile_key_map_(std::less<const std::string>(), allocator_.Adapter(kArenaAllocProfile)) {
}
ProfileCompilationInfo::~ProfileCompilationInfo() {
- VLOG(profiler) << Dumpable<MemStats>(arena_.GetMemStats());
+ VLOG(profiler) << Dumpable<MemStats>(allocator_.GetMemStats());
for (DexFileData* data : info_) {
delete data;
}
@@ -569,8 +569,8 @@
uint8_t profile_index = profile_index_it->second;
if (info_.size() <= profile_index) {
// This is a new addition. Add it to the info_ array.
- DexFileData* dex_file_data = new (&arena_) DexFileData(
- &arena_,
+ DexFileData* dex_file_data = new (&allocator_) DexFileData(
+ &allocator_,
profile_key,
checksum,
profile_index,
@@ -1704,7 +1704,10 @@
if (m < (number_of_methods / kFavorSplit)) {
method_idx %= kFavorFirstN;
}
- info.AddMethodIndex(MethodHotness::kFlagHot,
+ // Alternate between startup and post startup.
+ uint32_t flags = MethodHotness::kFlagHot;
+ flags |= ((m & 1) != 0) ? MethodHotness::kFlagPostStartup : MethodHotness::kFlagStartup;
+ info.AddMethodIndex(static_cast<MethodHotness::Flag>(flags),
profile_key,
/*method_idx*/ 0,
method_idx,
@@ -1761,8 +1764,13 @@
if (number_of_methods - i == methods_required_in_profile ||
std::rand() % (number_of_methods - i - methods_required_in_profile) == 0) {
uint32_t method_index = (method_start_index + i) % number_of_methods;
- info.AddMethodIndex(MethodHotness::kFlagHot, MethodReference(dex_file.get(),
- method_index));
+ // Alternate between startup and post startup.
+ uint32_t flags = MethodHotness::kFlagHot;
+ flags |= ((method_index & 1) != 0)
+ ? MethodHotness::kFlagPostStartup
+ : MethodHotness::kFlagStartup;
+ info.AddMethodIndex(static_cast<MethodHotness::Flag>(flags),
+ MethodReference(dex_file.get(), method_index));
methods_required_in_profile--;
}
}
@@ -1863,7 +1871,7 @@
ProfileCompilationInfo::DexPcData*
ProfileCompilationInfo::FindOrAddDexPc(InlineCacheMap* inline_cache, uint32_t dex_pc) {
- return &(inline_cache->FindOrAdd(dex_pc, DexPcData(&arena_))->second);
+ return &(inline_cache->FindOrAdd(dex_pc, DexPcData(&allocator_))->second);
}
std::unordered_set<std::string> ProfileCompilationInfo::GetClassDescriptors(
diff --git a/runtime/jit/profile_compilation_info.h b/runtime/jit/profile_compilation_info.h
index 09de29e..8889b34 100644
--- a/runtime/jit/profile_compilation_info.h
+++ b/runtime/jit/profile_compilation_info.h
@@ -133,10 +133,10 @@
// megamorphic and its possible types).
// If the receiver is megamorphic or is missing types the set of classes will be empty.
struct DexPcData : public ArenaObject<kArenaAllocProfile> {
- explicit DexPcData(ArenaAllocator* arena)
+ explicit DexPcData(ArenaAllocator* allocator)
: is_missing_types(false),
is_megamorphic(false),
- classes(std::less<ClassReference>(), arena->Adapter(kArenaAllocProfile)) {}
+ classes(std::less<ClassReference>(), allocator->Adapter(kArenaAllocProfile)) {}
void AddClass(uint16_t dex_profile_idx, const dex::TypeIndex& type_idx);
void SetIsMegamorphic() {
if (is_missing_types) return;
@@ -405,7 +405,7 @@
static bool Equals(const ProfileCompilationInfo::OfflineProfileMethodInfo& pmi1,
const ProfileCompilationInfo::OfflineProfileMethodInfo& pmi2);
- ArenaAllocator* GetArena() { return &arena_; }
+ ArenaAllocator* GetAllocator() { return &allocator_; }
// Return all of the class descriptors in the profile for a set of dex files.
std::unordered_set<std::string> GetClassDescriptors(const std::vector<const DexFile*>& dex_files);
@@ -429,19 +429,19 @@
// profile_key_map_ and info_. However, it makes the profiles logic much
// simpler if we have references here as well.
struct DexFileData : public DeletableArenaObject<kArenaAllocProfile> {
- DexFileData(ArenaAllocator* arena,
+ DexFileData(ArenaAllocator* allocator,
const std::string& key,
uint32_t location_checksum,
uint16_t index,
uint32_t num_methods)
- : arena_(arena),
+ : arena_(allocator),
profile_key(key),
profile_index(index),
checksum(location_checksum),
- method_map(std::less<uint16_t>(), arena->Adapter(kArenaAllocProfile)),
- class_set(std::less<dex::TypeIndex>(), arena->Adapter(kArenaAllocProfile)),
+ method_map(std::less<uint16_t>(), allocator->Adapter(kArenaAllocProfile)),
+ class_set(std::less<dex::TypeIndex>(), allocator->Adapter(kArenaAllocProfile)),
num_method_ids(num_methods),
- bitmap_storage(arena->Adapter(kArenaAllocProfile)) {
+ bitmap_storage(allocator->Adapter(kArenaAllocProfile)) {
const size_t num_bits = num_method_ids * kBitmapIndexCount;
bitmap_storage.resize(RoundUp(num_bits, kBitsPerByte) / kBitsPerByte);
if (!bitmap_storage.empty()) {
@@ -698,7 +698,7 @@
friend class Dex2oatLayoutTest;
ArenaPool default_arena_pool_;
- ArenaAllocator arena_;
+ ArenaAllocator allocator_;
// Vector containing the actual profile info.
// The vector index is the profile index of the dex data and
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index af6a45f..2bf8d8b 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -349,7 +349,7 @@
// Mark startup methods as hot if they have more than hot_method_sample_threshold
// samples. This means they will get compiled by the compiler driver.
if (method.GetProfilingInfo(kRuntimePointerSize) != nullptr ||
- (method.GetAccessFlags() & kAccPreviouslyWarm) != 0 ||
+ method.PreviouslyWarm() ||
counter >= hot_method_sample_threshold) {
hot_methods->AddReference(method.GetDexFile(), method.GetDexMethodIndex());
} else if (counter != 0) {
diff --git a/runtime/jni_env_ext.h b/runtime/jni_env_ext.h
index af933ae..2f6c5dc 100644
--- a/runtime/jni_env_ext.h
+++ b/runtime/jni_env_ext.h
@@ -45,7 +45,8 @@
~JNIEnvExt();
void DumpReferenceTables(std::ostream& os)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::alloc_tracker_lock_);
void SetCheckJniEnabled(bool enabled) REQUIRES(!Locks::jni_function_table_lock_);
@@ -53,7 +54,9 @@
void PopFrame() REQUIRES_SHARED(Locks::mutator_lock_);
template<typename T>
- T AddLocalReference(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
+ T AddLocalReference(ObjPtr<mirror::Object> obj)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::alloc_tracker_lock_);
static Offset SegmentStateOffset(size_t pointer_size);
static Offset LocalRefCookieOffset(size_t pointer_size);
diff --git a/runtime/modifiers.h b/runtime/modifiers.h
index 68ab4a4..4b790a0 100644
--- a/runtime/modifiers.h
+++ b/runtime/modifiers.h
@@ -79,6 +79,11 @@
// virtual call.
static constexpr uint32_t kAccSingleImplementation = 0x08000000; // method (runtime)
+// Not currently used, except for intrinsic methods where these bits
+// are part of the intrinsic ordinal.
+static constexpr uint32_t kAccMayBeUnusedBits = 0x70000000;
+
+// Set by the compiler driver when compiling boot classes with instrinsic methods.
static constexpr uint32_t kAccIntrinsic = 0x80000000; // method (runtime)
// Special runtime-only flags.
@@ -89,8 +94,10 @@
// class/ancestor overrides finalize()
static constexpr uint32_t kAccClassIsFinalizable = 0x80000000;
-static constexpr uint32_t kAccFlagsNotUsedByIntrinsic = 0x00FFFFFF;
-static constexpr uint32_t kAccMaxIntrinsic = 0x7F;
+// Continuous sequence of bits used to hold the ordinal of an intrinsic method. Flags
+// which overlap are not valid when kAccIntrinsic is set.
+static constexpr uint32_t kAccIntrinsicBits = kAccMayBeUnusedBits | kAccSingleImplementation |
+ kAccMustCountLocks | kAccCompileDontBother | kAccDefaultConflict | kAccPreviouslyWarm;
// Valid (meaningful) bits for a field.
static constexpr uint32_t kAccValidFieldFlags = kAccPublic | kAccPrivate | kAccProtected |
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index d85479a..7823413 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -1143,7 +1143,7 @@
Handle<mirror::Object> h_obj(hs.NewHandle(obj));
Runtime::Current()->GetRuntimeCallbacks()->ObjectWaitStart(h_obj, ms);
- if (UNLIKELY(self->IsExceptionPending())) {
+ if (UNLIKELY(self->ObserveAsyncException() || self->IsExceptionPending())) {
// See b/65558434 for information on handling of exceptions here.
return;
}
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 3357fa7..70dd5cb 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -95,10 +95,10 @@
}
static void VMDebug_startMethodTracingFd(JNIEnv* env, jclass, jstring javaTraceFilename,
- jobject javaFd, jint bufferSize, jint flags,
+ jint javaFd, jint bufferSize, jint flags,
jboolean samplingEnabled, jint intervalUs,
jboolean streamingOutput) {
- int originalFd = jniGetFDFromFileDescriptor(env, javaFd);
+ int originalFd = javaFd;
if (originalFd < 0) {
return;
}
@@ -224,9 +224,9 @@
* Cause "hprof" data to be dumped. We can throw an IOException if an
* error occurs during file handling.
*/
-static void VMDebug_dumpHprofData(JNIEnv* env, jclass, jstring javaFilename, jobject javaFd) {
+static void VMDebug_dumpHprofData(JNIEnv* env, jclass, jstring javaFilename, jint javaFd) {
// Only one of these may be null.
- if (javaFilename == nullptr && javaFd == nullptr) {
+ if (javaFilename == nullptr && javaFd < 0) {
ScopedObjectAccess soa(env);
ThrowNullPointerException("fileName == null && fd == null");
return;
@@ -243,15 +243,7 @@
filename = "[fd]";
}
- int fd = -1;
- if (javaFd != nullptr) {
- fd = jniGetFDFromFileDescriptor(env, javaFd);
- if (fd < 0) {
- ScopedObjectAccess soa(env);
- ThrowRuntimeException("Invalid file descriptor");
- return;
- }
- }
+ int fd = javaFd;
hprof::DumpHeap(filename.c_str(), fd, false);
}
@@ -537,7 +529,7 @@
NATIVE_METHOD(VMDebug, countInstancesOfClass, "(Ljava/lang/Class;Z)J"),
NATIVE_METHOD(VMDebug, countInstancesOfClasses, "([Ljava/lang/Class;Z)[J"),
NATIVE_METHOD(VMDebug, crash, "()V"),
- NATIVE_METHOD(VMDebug, dumpHprofData, "(Ljava/lang/String;Ljava/io/FileDescriptor;)V"),
+ NATIVE_METHOD(VMDebug, dumpHprofData, "(Ljava/lang/String;I)V"),
NATIVE_METHOD(VMDebug, dumpHprofDataDdms, "()V"),
NATIVE_METHOD(VMDebug, dumpReferenceTables, "()V"),
NATIVE_METHOD(VMDebug, getAllocCount, "(I)I"),
@@ -557,7 +549,7 @@
NATIVE_METHOD(VMDebug, startEmulatorTracing, "()V"),
NATIVE_METHOD(VMDebug, startInstructionCounting, "()V"),
NATIVE_METHOD(VMDebug, startMethodTracingDdmsImpl, "(IIZI)V"),
- NATIVE_METHOD(VMDebug, startMethodTracingFd, "(Ljava/lang/String;Ljava/io/FileDescriptor;IIZIZ)V"),
+ NATIVE_METHOD(VMDebug, startMethodTracingFd, "(Ljava/lang/String;IIIZIZ)V"),
NATIVE_METHOD(VMDebug, startMethodTracingFilename, "(Ljava/lang/String;IIZI)V"),
NATIVE_METHOD(VMDebug, stopAllocCounting, "()V"),
NATIVE_METHOD(VMDebug, stopEmulatorTracing, "()V"),
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 9a42c29..3f4cb94 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -638,7 +638,7 @@
Runtime* runtime = Runtime::Current();
if (UNLIKELY(runtime == nullptr)) {
// This must be oatdump without boot image. Make sure the .bss is inaccessible.
- mprotect(const_cast<uint8_t*>(BssBegin()), BssSize(), PROT_NONE);
+ CheckedCall(mprotect, "protect bss", const_cast<uint8_t*>(BssBegin()), BssSize(), PROT_NONE);
} else {
// Map boot image tables into the .bss. The reserved size must match size of the tables.
size_t reserved_size = static_cast<size_t>(boot_image_tables_end - boot_image_tables);
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 7cabae5..66b24a9 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -435,8 +435,13 @@
// Update the oat file on disk if we can, based on the --compiler-filter
// option derived from the current runtime options.
// This may fail, but that's okay. Best effort is all that matters here.
- switch (oat_file_assistant.MakeUpToDate(/*profile_changed*/false,
- context.get(),
+ // TODO(calin): b/64530081 b/66984396. Pass a null context to verify and compile
+ // secondary dex files in isolation (and avoid to extract/verify the main apk
+ // if it's in the class path). Note this trades correctness for performance
+ // since the resulting slow down is unacceptable in some cases until b/64530081
+ // is fixed.
+ switch (oat_file_assistant.MakeUpToDate(/*profile_changed*/ false,
+ /*class_loader_context*/ nullptr,
/*out*/ &error_msg)) {
case OatFileAssistant::kUpdateFailed:
LOG(WARNING) << error_msg;
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 1d524fd..9888186 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -310,6 +310,9 @@
.Define("-XX:ThreadSuspendTimeout=_") // in ms
.WithType<MillisecondsToNanoseconds>() // store as ns
.IntoKey(M::ThreadSuspendTimeout)
+ .Define("-XX:GlobalRefAllocStackTraceLimit=_") // Number of free slots to enable tracing.
+ .WithType<unsigned int>()
+ .IntoKey(M::GlobalRefAllocStackTraceLimit)
.Define("-XX:SlowDebug=_")
.WithType<bool>()
.WithValueMap({{"false", false}, {"true", true}})
diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h
index 45e78bc..00674b2 100644
--- a/runtime/read_barrier.h
+++ b/runtime/read_barrier.h
@@ -89,6 +89,14 @@
static void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Without the holder object, and only with the read barrier configuration (no-op otherwise).
+ static void MaybeAssertToSpaceInvariant(mirror::Object* ref)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (kUseReadBarrier) {
+ AssertToSpaceInvariant(ref);
+ }
+ }
+
// ALWAYS_INLINE on this caused a performance regression b/26744236.
static mirror::Object* Mark(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc
index e6e588e..a6df27b 100644
--- a/runtime/reference_table.cc
+++ b/runtime/reference_table.cc
@@ -19,6 +19,8 @@
#include "android-base/stringprintf.h"
#include "base/mutex.h"
+#include "gc/allocation_record.h"
+#include "gc/heap.h"
#include "indirect_reference_table.h"
#include "mirror/array-inl.h"
#include "mirror/array.h"
@@ -206,6 +208,54 @@
}
}
os << StringPrintf(" %5d: ", idx) << ref << " " << className << extras << "\n";
+ if (runtime->GetHeap()->IsAllocTrackingEnabled()) {
+ MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
+
+ gc::AllocRecordObjectMap* records = runtime->GetHeap()->GetAllocationRecords();
+ DCHECK(records != nullptr);
+ // It's annoying that this is a list. But this code should be very uncommon to be executed.
+
+ auto print_stack = [&](ObjPtr<mirror::Object> to_print, const std::string& msg)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::alloc_tracker_lock_) {
+ for (auto it = records->Begin(), end = records->End(); it != end; ++it) {
+ GcRoot<mirror::Object>& stack_for_object = it->first;
+ gc::AllocRecord& record = it->second;
+ if (stack_for_object.Read() == to_print.Ptr()) {
+ os << " " << msg << "\n";
+ const gc::AllocRecordStackTrace* trace = record.GetStackTrace();
+ size_t depth = trace->GetDepth();
+ if (depth == 0) {
+ os << " (No managed frames)\n";
+ } else {
+ for (size_t i = 0; i < depth; ++i) {
+ const gc::AllocRecordStackTraceElement& frame = trace->GetStackElement(i);
+ os << " ";
+ if (frame.GetMethod() == nullptr) {
+ os << "(missing method data)\n";
+ continue;
+ }
+ os << frame.GetMethod()->PrettyMethod(true)
+ << ":"
+ << frame.ComputeLineNumber()
+ << "\n";
+ }
+ }
+ break;
+ }
+ }
+ };
+ // Print the stack trace of the ref.
+ print_stack(ref, "Allocated at:");
+
+ // If it's a reference, see if we have data about the referent.
+ if (ref->IsReferenceInstance()) {
+ ObjPtr<mirror::Object> referent = ref->AsReference()->GetReferent();
+ if (referent != nullptr) {
+ print_stack(referent, "Referent allocated at:");
+ }
+ }
+ }
}
// Make a copy of the table and sort it, only adding non null and not cleared elements.
diff --git a/runtime/reference_table.h b/runtime/reference_table.h
index 010c6f8..6af5ca5 100644
--- a/runtime/reference_table.h
+++ b/runtime/reference_table.h
@@ -47,7 +47,9 @@
size_t Size() const;
- void Dump(std::ostream& os) REQUIRES_SHARED(Locks::mutator_lock_);
+ void Dump(std::ostream& os)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::alloc_tracker_lock_);
void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -56,7 +58,8 @@
typedef std::vector<GcRoot<mirror::Object>,
TrackingAllocator<GcRoot<mirror::Object>, kAllocatorTagReferenceTable>> Table;
static void Dump(std::ostream& os, Table& entries)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::alloc_tracker_lock_);
friend class IndirectReferenceTable; // For Dump.
std::string name_;
diff --git a/runtime/reference_table_test.cc b/runtime/reference_table_test.cc
index d830387..1e7fc3e 100644
--- a/runtime/reference_table_test.cc
+++ b/runtime/reference_table_test.cc
@@ -16,6 +16,8 @@
#include "reference_table.h"
+#include <regex>
+
#include "android-base/stringprintf.h"
#include "art_method-inl.h"
@@ -30,6 +32,7 @@
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
#include "thread-current-inl.h"
+#include "well_known_classes.h"
namespace art {
@@ -156,6 +159,7 @@
rt.Dump(oss);
EXPECT_NE(oss.str().find("java.lang.ref.WeakReference (referent is null)"), std::string::npos)
<< oss.str();
+ rt.Remove(empty_reference);
}
{
@@ -168,6 +172,86 @@
EXPECT_NE(oss.str().find("java.lang.ref.WeakReference (referent is a java.lang.String)"),
std::string::npos)
<< oss.str();
+ rt.Remove(non_empty_reference);
+ }
+
+ // Add two objects. Enable allocation tracking for the latter.
+ {
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::String> h_without_trace(hs.NewHandle(
+ mirror::String::AllocFromModifiedUtf8(soa.Self(), "Without")));
+
+ {
+ ScopedThreadSuspension sts(soa.Self(), ThreadState::kSuspended);
+ gc::AllocRecordObjectMap::SetAllocTrackingEnabled(true);
+ }
+
+ // To get a stack, actually make a call. Use substring, that's simple. Calling through JNI
+ // avoids having to create the low-level args array ourselves.
+ Handle<mirror::Object> h_with_trace;
+ {
+ jmethodID substr = soa.Env()->GetMethodID(WellKnownClasses::java_lang_String,
+ "substring",
+ "(II)Ljava/lang/String;");
+ ASSERT_TRUE(substr != nullptr);
+ jobject jobj = soa.Env()->AddLocalReference<jobject>(h_without_trace.Get());
+ ASSERT_TRUE(jobj != nullptr);
+ jobject result = soa.Env()->CallObjectMethod(jobj,
+ substr,
+ static_cast<jint>(0),
+ static_cast<jint>(4));
+ ASSERT_TRUE(result != nullptr);
+ h_with_trace = hs.NewHandle(soa.Self()->DecodeJObject(result));
+ }
+
+ Handle<mirror::Object> h_ref;
+ {
+ jclass weak_ref_class = soa.Env()->FindClass("java/lang/ref/WeakReference");
+ ASSERT_TRUE(weak_ref_class != nullptr);
+ jmethodID init = soa.Env()->GetMethodID(weak_ref_class,
+ "<init>",
+ "(Ljava/lang/Object;)V");
+ ASSERT_TRUE(init != nullptr);
+ jobject referent = soa.Env()->AddLocalReference<jobject>(h_with_trace.Get());
+ jobject result = soa.Env()->NewObject(weak_ref_class, init, referent);
+ ASSERT_TRUE(result != nullptr);
+ h_ref = hs.NewHandle(soa.Self()->DecodeJObject(result));
+ }
+
+ rt.Add(h_without_trace.Get());
+ rt.Add(h_with_trace.Get());
+ rt.Add(h_ref.Get());
+
+ std::ostringstream oss;
+ rt.Dump(oss);
+
+ constexpr const char* kStackTracePattern =
+ R"(test reference table dump:\n)"
+ R"( Last 3 entries \(of 3\):\n)" // NOLINT
+ R"( 2: 0x[0-9a-f]* java.lang.ref.WeakReference \(referent is a java.lang.String\)\n)" // NOLINT
+ R"( Allocated at:\n)"
+ R"( \(No managed frames\)\n)" // NOLINT
+ R"( Referent allocated at:\n)"
+ R"( java.lang.String java.lang.String.fastSubstring\(int, int\):-2\n)" // NOLINT
+ R"( java.lang.String java.lang.String.substring\(int, int\):[0-9]*\n)" // NOLINT
+ R"( 1: 0x[0-9a-f]* java.lang.String "With"\n)"
+ R"( Allocated at:\n)"
+ R"( java.lang.String java.lang.String.fastSubstring\(int, int\):-2\n)" // NOLINT
+ R"( java.lang.String java.lang.String.substring\(int, int\):[0-9]*\n)" // NOLINT
+ R"( 0: 0x[0-9a-f]* java.lang.String "Without"\n)"
+ R"( Summary:\n)"
+ R"( 2 of java.lang.String \(2 unique instances\)\n)" // NOLINT
+ R"( 1 of java.lang.ref.WeakReference\n)";
+ std::regex stack_trace_regex(kStackTracePattern);
+ std::smatch stack_trace_match;
+ std::string str = oss.str();
+ bool found = std::regex_search(str, stack_trace_match, stack_trace_regex);
+ EXPECT_TRUE(found) << str;
+
+ {
+ ScopedThreadSuspension sts(soa.Self(), ThreadState::kSuspended);
+ gc::AllocRecordObjectMap::SetAllocTrackingEnabled(false);
+ }
}
}
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 78a60fa..cafae22 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -145,4 +145,6 @@
RUNTIME_OPTIONS_KEY (bool, SlowDebug, false)
+RUNTIME_OPTIONS_KEY (unsigned int, GlobalRefAllocStackTraceLimit, 0) // 0 = off
+
#undef RUNTIME_OPTIONS_KEY
diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc
index f0b6ee4..b50879f 100644
--- a/runtime/signal_catcher.cc
+++ b/runtime/signal_catcher.cc
@@ -27,7 +27,12 @@
#include <sstream>
-#include "android-base/stringprintf.h"
+#include <android-base/stringprintf.h>
+
+#if defined(ART_TARGET_ANDROID)
+#include <tombstoned/tombstoned.h>
+#endif
+
#include "arch/instruction_set.h"
#include "base/time_utils.h"
#include "base/unix_file/fd_file.h"
@@ -42,10 +47,6 @@
#include "thread_list.h"
#include "utils.h"
-#if defined(ART_TARGET_ANDROID)
-#include "tombstoned/tombstoned.h"
-#endif
-
namespace art {
static void DumpCmdLine(std::ostream& os) {
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 2412931..2753bf7 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2159,11 +2159,11 @@
ScopedObjectAccess soa(self);
// We may need to call user-supplied managed code, do this before final clean-up.
HandleUncaughtExceptions(soa);
+ RemoveFromThreadGroup(soa);
Runtime* runtime = Runtime::Current();
if (runtime != nullptr) {
runtime->GetRuntimeCallbacks()->ThreadDeath(self);
}
- RemoveFromThreadGroup(soa);
// this.nativePeer = 0;
if (Runtime::Current()->IsActiveTransaction()) {
@@ -3080,9 +3080,7 @@
UNREACHABLE();
}
- if (kUseReadBarrier) {
- ReadBarrier::AssertToSpaceInvariant(exception.Ptr());
- }
+ ReadBarrier::MaybeAssertToSpaceInvariant(exception.Ptr());
// This is a real exception: let the instrumentation know about it.
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
@@ -3131,11 +3129,8 @@
} else {
// Exception was put back with a throw location.
DCHECK(IsExceptionPending());
- if (kUseReadBarrier) {
- // Check the to-space invariant on the re-installed exception.
- ObjPtr<mirror::Throwable> reinstalled_exception = GetException();
- ReadBarrier::AssertToSpaceInvariant(reinstalled_exception.Ptr());
- }
+ // Check the to-space invariant on the re-installed exception (if applicable).
+ ReadBarrier::MaybeAssertToSpaceInvariant(GetException());
}
exception_handler.DoLongJump();
}
@@ -3480,6 +3475,10 @@
visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&tlsPtr_.exception),
RootInfo(kRootNativeStack, thread_id));
}
+ if (tlsPtr_.async_exception != nullptr) {
+ visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&tlsPtr_.async_exception),
+ RootInfo(kRootNativeStack, thread_id));
+ }
visitor->VisitRootIfNonNull(&tlsPtr_.monitor_enter_object, RootInfo(kRootNativeStack, thread_id));
tlsPtr_.jni_env->locals.VisitRoots(visitor, RootInfo(kRootJNILocal, thread_id));
tlsPtr_.jni_env->monitors.VisitRoots(visitor, RootInfo(kRootJNIMonitor, thread_id));
@@ -3699,6 +3698,34 @@
method_type);
}
+void Thread::SetAsyncException(ObjPtr<mirror::Throwable> new_exception) {
+ CHECK(new_exception != nullptr);
+ if (kIsDebugBuild) {
+ // Make sure we are in a checkpoint.
+ MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_);
+ CHECK(this == Thread::Current() || GetSuspendCount() >= 1)
+ << "It doesn't look like this was called in a checkpoint! this: "
+ << this << " count: " << GetSuspendCount();
+ }
+ tlsPtr_.async_exception = new_exception.Ptr();
+}
+
+bool Thread::ObserveAsyncException() {
+ DCHECK(this == Thread::Current());
+ if (tlsPtr_.async_exception != nullptr) {
+ if (tlsPtr_.exception != nullptr) {
+ LOG(WARNING) << "Overwriting pending exception with async exception. Pending exception is: "
+ << tlsPtr_.exception->Dump();
+ LOG(WARNING) << "Async exception is " << tlsPtr_.async_exception->Dump();
+ }
+ tlsPtr_.exception = tlsPtr_.async_exception;
+ tlsPtr_.async_exception = nullptr;
+ return true;
+ } else {
+ return IsExceptionPending();
+ }
+}
+
void Thread::SetException(ObjPtr<mirror::Throwable> new_exception) {
CHECK(new_exception != nullptr);
// TODO: DCHECK(!IsExceptionPending());
diff --git a/runtime/thread.h b/runtime/thread.h
index 2e4a3da..ab89778 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -401,6 +401,10 @@
return tlsPtr_.exception != nullptr;
}
+ bool IsAsyncExceptionPending() const {
+ return tlsPtr_.async_exception != nullptr;
+ }
+
mirror::Throwable* GetException() const REQUIRES_SHARED(Locks::mutator_lock_) {
return tlsPtr_.exception;
}
@@ -412,10 +416,24 @@
void SetException(ObjPtr<mirror::Throwable> new_exception) REQUIRES_SHARED(Locks::mutator_lock_);
+ // Set an exception that is asynchronously thrown from a different thread. This will be checked
+ // periodically and might overwrite the current 'Exception'. This can only be called from a
+ // checkpoint.
+ //
+ // The caller should also make sure that the thread has been deoptimized so that the exception
+ // could be detected on back-edges.
+ void SetAsyncException(ObjPtr<mirror::Throwable> new_exception)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
void ClearException() REQUIRES_SHARED(Locks::mutator_lock_) {
tlsPtr_.exception = nullptr;
}
+ // Move the current async-exception to the main exception. This should be called when the current
+ // thread is ready to deal with any async exceptions. Returns true if there is an async exception
+ // that needs to be dealt with, false otherwise.
+ bool ObserveAsyncException() REQUIRES_SHARED(Locks::mutator_lock_);
+
// Find catch block and perform long jump to appropriate exception handle
NO_RETURN void QuickDeliverException() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1518,7 +1536,8 @@
thread_local_objects(0), mterp_current_ibase(nullptr), mterp_default_ibase(nullptr),
mterp_alt_ibase(nullptr), thread_local_alloc_stack_top(nullptr),
thread_local_alloc_stack_end(nullptr),
- flip_function(nullptr), method_verifier(nullptr), thread_local_mark_stack(nullptr) {
+ flip_function(nullptr), method_verifier(nullptr), thread_local_mark_stack(nullptr),
+ async_exception(nullptr) {
std::fill(held_mutexes, held_mutexes + kLockLevelCount, nullptr);
}
@@ -1675,6 +1694,9 @@
// Thread-local mark stack for the concurrent copying collector.
gc::accounting::AtomicStack<mirror::Object>* thread_local_mark_stack;
+
+ // The pending async-exception or null.
+ mirror::Throwable* async_exception;
} tlsPtr_;
// Guards the 'wait_monitor_' members.
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index fb77b84..cffaffc 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -31,6 +31,7 @@
#include "base/time_utils.h"
#include "runtime.h"
#include "thread-current-inl.h"
+#include "utils.h"
namespace art {
@@ -49,8 +50,11 @@
false, false, &error_msg));
CHECK(stack_.get() != nullptr) << error_msg;
CHECK_ALIGNED(stack_->Begin(), kPageSize);
- int mprotect_result = mprotect(stack_->Begin(), kPageSize, PROT_NONE);
- CHECK_EQ(mprotect_result, 0) << "Failed to mprotect() bottom page of thread pool worker stack.";
+ CheckedCall(mprotect,
+ "mprotect bottom page of thread pool worker stack",
+ stack_->Begin(),
+ kPageSize,
+ PROT_NONE);
const char* reason = "new thread pool worker thread";
pthread_attr_t attr;
CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), reason);
diff --git a/runtime/utils.h b/runtime/utils.h
index 4cb06c1..fbf812a 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -338,6 +338,15 @@
// Madvise the largest page aligned region within begin and end.
int MadviseLargestPageAlignedRegion(const uint8_t* begin, const uint8_t* end, int advice);
+template <typename Func, typename... Args>
+static inline void CheckedCall(const Func& function, const char* what, Args... args) {
+ int rc = function(args...);
+ if (UNLIKELY(rc != 0)) {
+ errno = rc;
+ PLOG(FATAL) << "Checked call failed for " << what;
+ }
+}
+
} // namespace art
#endif // ART_RUNTIME_UTILS_H_
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index cfdf20d..7246bae 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -43,6 +43,7 @@
#include "mirror/class.h"
#include "mirror/dex_cache-inl.h"
#include "mirror/method_handle_impl.h"
+#include "mirror/method_type.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "reg_type-inl.h"
@@ -431,7 +432,7 @@
}
}
if ((verifier.encountered_failure_types_ & VerifyError::VERIFY_ERROR_LOCKING) != 0) {
- method->AddAccessFlags(kAccMustCountLocks);
+ method->SetMustCountLocks();
}
}
} else {
@@ -1169,6 +1170,15 @@
case Instruction::kVerifyRegBWide:
result = result && CheckWideRegisterIndex(inst->VRegB());
break;
+ case Instruction::kVerifyRegBCallSite:
+ result = result && CheckCallSiteIndex(inst->VRegB());
+ break;
+ case Instruction::kVerifyRegBMethodHandle:
+ result = result && CheckMethodHandleIndex(inst->VRegB());
+ break;
+ case Instruction::kVerifyRegBPrototype:
+ result = result && CheckPrototypeIndex(inst->VRegB());
+ break;
}
switch (inst->GetVerifyTypeArgumentC()) {
case Instruction::kVerifyRegC:
@@ -1260,6 +1270,16 @@
return true;
}
+inline bool MethodVerifier::CheckCallSiteIndex(uint32_t idx) {
+ uint32_t limit = dex_file_->NumCallSiteIds();
+ if (UNLIKELY(idx >= limit)) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad call site index " << idx << " (max "
+ << limit << ")";
+ return false;
+ }
+ return true;
+}
+
inline bool MethodVerifier::CheckFieldIndex(uint32_t idx) {
if (UNLIKELY(idx >= dex_file_->GetHeader().field_ids_size_)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad field index " << idx << " (max "
@@ -1278,6 +1298,16 @@
return true;
}
+inline bool MethodVerifier::CheckMethodHandleIndex(uint32_t idx) {
+ uint32_t limit = dex_file_->NumMethodHandles();
+ if (UNLIKELY(idx >= limit)) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad method handle index " << idx << " (max "
+ << limit << ")";
+ return false;
+ }
+ return true;
+}
+
inline bool MethodVerifier::CheckNewInstance(dex::TypeIndex idx) {
if (UNLIKELY(idx.index_ >= dex_file_->GetHeader().type_ids_size_)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad type index " << idx.index_ << " (max "
@@ -2320,6 +2350,18 @@
: reg_types_.JavaLangClass());
break;
}
+ case Instruction::CONST_METHOD_HANDLE:
+ work_line_->SetRegisterType<LockOp::kClear>(
+ this, inst->VRegA_21c(), reg_types_.JavaLangInvokeMethodHandle());
+ // TODO: add compiler support for const-method-{handle,type} (b/66890674)
+ Fail(VERIFY_ERROR_FORCE_INTERPRETER);
+ break;
+ case Instruction::CONST_METHOD_TYPE:
+ work_line_->SetRegisterType<LockOp::kClear>(
+ this, inst->VRegA_21c(), reg_types_.JavaLangInvokeMethodType());
+ // TODO: add compiler support for const-method-{handle,type} (b/66890674)
+ Fail(VERIFY_ERROR_FORCE_INTERPRETER);
+ break;
case Instruction::MONITOR_ENTER:
work_line_->PushMonitor(this, inst->VRegA_11x(), work_insn_idx_);
// Check whether the previous instruction is a move-object with vAA as a source, creating
@@ -3454,7 +3496,6 @@
/* These should never appear during verification. */
case Instruction::UNUSED_3E ... Instruction::UNUSED_43:
case Instruction::UNUSED_F3 ... Instruction::UNUSED_F9:
- case Instruction::UNUSED_FE ... Instruction::UNUSED_FF:
case Instruction::UNUSED_79:
case Instruction::UNUSED_7A:
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Unexpected opcode " << inst->DumpString(dex_file_);
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index da4102a..57ab56c 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -221,7 +221,7 @@
return IsConstructor() && !IsStatic();
}
- ScopedArenaAllocator& GetArena() {
+ ScopedArenaAllocator& GetScopedAllocator() {
return arena_;
}
@@ -404,6 +404,10 @@
/* Ensure that the wide register index is valid for this code item. */
bool CheckWideRegisterIndex(uint32_t idx);
+ // Perform static checks on an instruction referencing a CallSite. All we do here is ensure that
+ // the call site index is in the valid range.
+ bool CheckCallSiteIndex(uint32_t idx);
+
// Perform static checks on a field Get or set instruction. All we do here is ensure that the
// field index is in the valid range.
bool CheckFieldIndex(uint32_t idx);
@@ -412,6 +416,10 @@
// method index is in the valid range.
bool CheckMethodIndex(uint32_t idx);
+ // Perform static checks on an instruction referencing a constant method handle. All we do here
+ // is ensure that the method index is in the valid range.
+ bool CheckMethodHandleIndex(uint32_t idx);
+
// Perform static checks on a "new-instance" instruction. Specifically, make sure the class
// reference isn't for an array class.
bool CheckNewInstance(dex::TypeIndex idx);
diff --git a/runtime/verifier/reg_type-inl.h b/runtime/verifier/reg_type-inl.h
index 704d2a8..631c6bd 100644
--- a/runtime/verifier/reg_type-inl.h
+++ b/runtime/verifier/reg_type-inl.h
@@ -199,8 +199,8 @@
return instance_;
}
-inline void* RegType::operator new(size_t size, ScopedArenaAllocator* arena) {
- return arena->Alloc(size, kArenaAllocMisc);
+inline void* RegType::operator new(size_t size, ScopedArenaAllocator* allocator) {
+ return allocator->Alloc(size, kArenaAllocMisc);
}
} // namespace verifier
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index c5d8ff5..a2085a3 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -264,8 +264,8 @@
return ::operator new(size);
}
- static void* operator new(size_t size, ArenaAllocator* arena) = delete;
- static void* operator new(size_t size, ScopedArenaAllocator* arena);
+ static void* operator new(size_t size, ArenaAllocator* allocator) = delete;
+ static void* operator new(size_t size, ScopedArenaAllocator* allocator);
enum class AssignmentType {
kBoolean,
diff --git a/runtime/verifier/reg_type_cache-inl.h b/runtime/verifier/reg_type_cache-inl.h
index b57a2c8..197c976 100644
--- a/runtime/verifier/reg_type_cache-inl.h
+++ b/runtime/verifier/reg_type_cache-inl.h
@@ -19,6 +19,8 @@
#include "class_linker.h"
#include "mirror/class-inl.h"
+#include "mirror/method_handle_impl.h"
+#include "mirror/method_type.h"
#include "mirror/string.h"
#include "mirror/throwable.h"
#include "reg_type.h"
@@ -131,6 +133,20 @@
return *down_cast<const PreciseReferenceType*>(result);
}
+inline const PreciseReferenceType& RegTypeCache::JavaLangInvokeMethodHandle() {
+ const RegType* result = &FromClass("Ljava/lang/invoke/MethodHandle;",
+ mirror::MethodHandle::StaticClass(), true);
+ DCHECK(result->IsPreciseReference());
+ return *down_cast<const PreciseReferenceType*>(result);
+}
+
+inline const PreciseReferenceType& RegTypeCache::JavaLangInvokeMethodType() {
+ const RegType* result = &FromClass("Ljava/lang/invoke/MethodType;",
+ mirror::MethodType::StaticClass(), true);
+ DCHECK(result->IsPreciseReference());
+ return *down_cast<const PreciseReferenceType*>(result);
+}
+
inline const RegType& RegTypeCache::JavaLangThrowable(bool precise) {
const RegType* result = &FromClass("Ljava/lang/Throwable;",
mirror::Throwable::GetJavaLangThrowable(), precise);
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index 7077c55..96eca05 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -126,6 +126,8 @@
const PreciseReferenceType& JavaLangClass() REQUIRES_SHARED(Locks::mutator_lock_);
const PreciseReferenceType& JavaLangString() REQUIRES_SHARED(Locks::mutator_lock_);
+ const PreciseReferenceType& JavaLangInvokeMethodHandle() REQUIRES_SHARED(Locks::mutator_lock_);
+ const PreciseReferenceType& JavaLangInvokeMethodType() REQUIRES_SHARED(Locks::mutator_lock_);
const RegType& JavaLangThrowable(bool precise) REQUIRES_SHARED(Locks::mutator_lock_);
const RegType& JavaLangObject(bool precise) REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/verifier/register_line-inl.h b/runtime/verifier/register_line-inl.h
index 3da1680..a9c9428 100644
--- a/runtime/verifier/register_line-inl.h
+++ b/runtime/verifier/register_line-inl.h
@@ -178,14 +178,15 @@
}
inline RegisterLine* RegisterLine::Create(size_t num_regs, MethodVerifier* verifier) {
- void* memory = verifier->GetArena().Alloc(ComputeSize(num_regs));
+ void* memory = verifier->GetScopedAllocator().Alloc(ComputeSize(num_regs));
return new (memory) RegisterLine(num_regs, verifier);
}
inline RegisterLine::RegisterLine(size_t num_regs, MethodVerifier* verifier)
: num_regs_(num_regs),
- monitors_(verifier->GetArena().Adapter(kArenaAllocVerifier)),
- reg_to_lock_depths_(std::less<uint32_t>(), verifier->GetArena().Adapter(kArenaAllocVerifier)),
+ monitors_(verifier->GetScopedAllocator().Adapter(kArenaAllocVerifier)),
+ reg_to_lock_depths_(std::less<uint32_t>(),
+ verifier->GetScopedAllocator().Adapter(kArenaAllocVerifier)),
this_initialized_(false) {
std::uninitialized_fill_n(line_, num_regs_, 0u);
SetResultTypeToUnknown(verifier);
diff --git a/test/059-finalizer-throw/expected.txt b/test/059-finalizer-throw/expected.txt
index cbc9ece..f19011b 100644
--- a/test/059-finalizer-throw/expected.txt
+++ b/test/059-finalizer-throw/expected.txt
@@ -1,2 +1,3 @@
+Starting
In finalizer
done
diff --git a/test/059-finalizer-throw/src/Main.java b/test/059-finalizer-throw/src/Main.java
index 3bfbc2d..da5190a 100644
--- a/test/059-finalizer-throw/src/Main.java
+++ b/test/059-finalizer-throw/src/Main.java
@@ -25,11 +25,18 @@
static Object waiter = new Object();
static volatile boolean didFinal = false;
+ private volatile static Throwable preallocatedException;
+
static void createAndForget() {
Main main = new Main();
}
public static void main(String[] args) {
+ // Preallocate exception to lighten the load in the time-sensitive section.
+ preallocatedException = new InterruptedException("whee");
+ // Print out something to avoid effects of being the first to write.
+ System.out.println("Starting");
+
createAndForget();
System.gc();
@@ -65,6 +72,6 @@
didFinal = true;
- throw new InterruptedException("whee");
+ throw preallocatedException;
}
}
diff --git a/test/088-monitor-verification/src/Main.java b/test/088-monitor-verification/src/Main.java
index 3f7bb56..3016187 100644
--- a/test/088-monitor-verification/src/Main.java
+++ b/test/088-monitor-verification/src/Main.java
@@ -40,6 +40,8 @@
ensureJitCompiled(Main.class, "notExcessiveNesting");
ensureJitCompiled(Main.class, "notNested");
ensureJitCompiled(TwoPath.class, "twoPath");
+ ensureJitCompiled(Class.forName("OK"), "runNoMonitors");
+ ensureJitCompiled(Class.forName("OK"), "runStraightLine");
ensureJitCompiled(Class.forName("OK"), "runBalancedJoin");
Main m = new Main();
diff --git a/test/1914-get-local-instance/expected.txt b/test/1914-get-local-instance/expected.txt
index 4117942..09f0df1 100644
--- a/test/1914-get-local-instance/expected.txt
+++ b/test/1914-get-local-instance/expected.txt
@@ -10,3 +10,6 @@
Running public native void art.Test1914$TargetClass.NativeInstanceMethod(java.lang.Runnable) with "GetThis" on remote thread.
"GetThis" on public native void art.Test1914$TargetClass.NativeInstanceMethod(java.lang.Runnable) got value: TargetClass("NativeInstanceMethodObject")
Value is 'TargetClass("NativeInstanceMethodObject")' (class: class art.Test1914$TargetClass)
+Running public abstract void art.Test1914$Foo.InterfaceProxyMethod(java.lang.Runnable) with "GetThis" on remote thread.
+"GetThis" on public abstract void art.Test1914$Foo.InterfaceProxyMethod(java.lang.Runnable) got value: Proxy for [interface art.Test1914$Foo]
+ Value is 'Proxy for [interface art.Test1914$Foo]' (class: PROXY CLASS)
diff --git a/test/1914-get-local-instance/src/art/Test1914.java b/test/1914-get-local-instance/src/art/Test1914.java
index c09f519..e47f9cb 100644
--- a/test/1914-get-local-instance/src/art/Test1914.java
+++ b/test/1914-get-local-instance/src/art/Test1914.java
@@ -18,7 +18,9 @@
import java.lang.reflect.Constructor;
import java.lang.reflect.Executable;
+import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
import java.nio.ByteBuffer;
import java.util.concurrent.Semaphore;
import java.util.Arrays;
@@ -35,7 +37,7 @@
public static void reportValue(Object val) {
System.out.println("\tValue is '" + val + "' (class: "
- + (val != null ? val.getClass() : "NULL") + ")");
+ + (val != null ? (val instanceof Proxy ? "PROXY CLASS" : val.getClass()) : "NULL") + ")");
}
public static void StaticMethod(Runnable safepoint) {
@@ -151,7 +153,10 @@
private StackTrace.StackFrameData findStackFrame(Thread thr) {
for (StackTrace.StackFrameData frame : StackTrace.GetStackTrace(thr)) {
- if (frame.method.equals(target)) {
+ if (frame.method.equals(target) ||
+ (frame.method.getName().equals(target.getName()) &&
+ Arrays.deepEquals(frame.method.getParameterTypes(), target.getParameterTypes()) &&
+ ((Method)frame.method).getReturnType().equals(target.getReturnType()))) {
return frame;
}
}
@@ -163,6 +168,25 @@
return klass.getDeclaredMethod(name, Runnable.class);
}
+ public static interface Foo {
+ public void InterfaceProxyMethod(Runnable r);
+ }
+
+ public static Object getProxyObject(final Class... k) {
+ return Proxy.newProxyInstance(
+ Test1914.class.getClassLoader(),
+ k,
+ (p, m, a) -> {
+ if (m.getName().equals("toString")) {
+ return "Proxy for " + Arrays.toString(k);
+ } else {
+ ((Runnable)a[0]).run();
+ reportValue(p);
+ return null;
+ }
+ });
+ }
+
public static void run() throws Exception {
Locals.EnableLocalVariableAccess();
final TestCase[] MAIN_TEST_CASES = new TestCase[] {
@@ -172,6 +196,8 @@
getMethod(TargetClass.class, "InstanceMethod")),
new TestCase(new TargetClass("NativeInstanceMethodObject"),
getMethod(TargetClass.class, "NativeInstanceMethod")),
+ new TestCase(getProxyObject(Foo.class),
+ getMethod(Foo.class, "InterfaceProxyMethod")),
};
for (TestCase t: MAIN_TEST_CASES) {
diff --git a/test/1923-frame-pop/src/art/Trace.java b/test/1923-frame-pop/src/art/Trace.java
index ba3d397..8999bb1 100644
--- a/test/1923-frame-pop/src/art/Trace.java
+++ b/test/1923-frame-pop/src/art/Trace.java
@@ -53,4 +53,16 @@
public static native void watchFieldModification(Field f);
public static native void watchAllFieldAccesses();
public static native void watchAllFieldModifications();
+
+ // the names, arguments, and even line numbers of these functions are embedded in the tests so we
+ // need to add to the bottom and not modify old ones to maintain compat.
+ public static native void enableTracing2(Class<?> methodClass,
+ Method entryMethod,
+ Method exitMethod,
+ Method fieldAccess,
+ Method fieldModify,
+ Method singleStep,
+ Method ThreadStart,
+ Method ThreadEnd,
+ Thread thr);
}
diff --git a/test/1924-frame-pop-toggle/src/art/Trace.java b/test/1924-frame-pop-toggle/src/art/Trace.java
index ba3d397..8999bb1 100644
--- a/test/1924-frame-pop-toggle/src/art/Trace.java
+++ b/test/1924-frame-pop-toggle/src/art/Trace.java
@@ -53,4 +53,16 @@
public static native void watchFieldModification(Field f);
public static native void watchAllFieldAccesses();
public static native void watchAllFieldModifications();
+
+ // the names, arguments, and even line numbers of these functions are embedded in the tests so we
+ // need to add to the bottom and not modify old ones to maintain compat.
+ public static native void enableTracing2(Class<?> methodClass,
+ Method entryMethod,
+ Method exitMethod,
+ Method fieldAccess,
+ Method fieldModify,
+ Method singleStep,
+ Method ThreadStart,
+ Method ThreadEnd,
+ Thread thr);
}
diff --git a/test/1925-self-frame-pop/src/art/Trace.java b/test/1925-self-frame-pop/src/art/Trace.java
index ba3d397..8999bb1 100644
--- a/test/1925-self-frame-pop/src/art/Trace.java
+++ b/test/1925-self-frame-pop/src/art/Trace.java
@@ -53,4 +53,16 @@
public static native void watchFieldModification(Field f);
public static native void watchAllFieldAccesses();
public static native void watchAllFieldModifications();
+
+ // the names, arguments, and even line numbers of these functions are embedded in the tests so we
+ // need to add to the bottom and not modify old ones to maintain compat.
+ public static native void enableTracing2(Class<?> methodClass,
+ Method entryMethod,
+ Method exitMethod,
+ Method fieldAccess,
+ Method fieldModify,
+ Method singleStep,
+ Method ThreadStart,
+ Method ThreadEnd,
+ Thread thr);
}
diff --git a/test/1926-missed-frame-pop/src/art/Trace.java b/test/1926-missed-frame-pop/src/art/Trace.java
index ba3d397..8999bb1 100644
--- a/test/1926-missed-frame-pop/src/art/Trace.java
+++ b/test/1926-missed-frame-pop/src/art/Trace.java
@@ -53,4 +53,16 @@
public static native void watchFieldModification(Field f);
public static native void watchAllFieldAccesses();
public static native void watchAllFieldModifications();
+
+ // the names, arguments, and even line numbers of these functions are embedded in the tests so we
+ // need to add to the bottom and not modify old ones to maintain compat.
+ public static native void enableTracing2(Class<?> methodClass,
+ Method entryMethod,
+ Method exitMethod,
+ Method fieldAccess,
+ Method fieldModify,
+ Method singleStep,
+ Method ThreadStart,
+ Method ThreadEnd,
+ Thread thr);
}
diff --git a/test/1929-exception-catch-exception/build b/test/1929-exception-catch-exception/build
new file mode 100644
index 0000000..10ffcc5
--- /dev/null
+++ b/test/1929-exception-catch-exception/build
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See b/65168732
+export USE_D8=false
+
+./default-build "$@"
diff --git a/test/1934-jvmti-signal-thread/expected.txt b/test/1934-jvmti-signal-thread/expected.txt
new file mode 100644
index 0000000..69a0e9e
--- /dev/null
+++ b/test/1934-jvmti-signal-thread/expected.txt
@@ -0,0 +1,27 @@
+Interrupt before start
+interrupting other thread before starting
+Caught exception java.lang.RuntimeException: JVMTI_ERROR_THREAD_NOT_ALIVE
+Stop before start
+stopping other thread before starting
+Caught exception java.lang.RuntimeException: JVMTI_ERROR_THREAD_NOT_ALIVE
+Interrupt recur
+Interrupting other thread recurring
+Other thread Interrupted. err: java.lang.Error: Interrupted!
+Stop Recur
+stopping other thread recurring
+Other thread Stopped by: java.lang.Error: AWESOME!
+Interrupt spinning
+Interrupting other thread spinning
+Other thread Interrupted.
+Stop spinning
+stopping other thread spinning
+Other thread Stopped by: java.lang.Error: AWESOME!
+Interrupt wait
+interrupting other thread waiting
+Other thread interrupted. err: java.lang.Error: Interrupted!
+Stop wait
+stopping other thread waiting
+Other thread Stopped by: java.lang.Error: AWESOME
+Stop in native
+stopping other thread
+Other thread Stopped by: java.lang.Error: AWESOME
diff --git a/test/1934-jvmti-signal-thread/info.txt b/test/1934-jvmti-signal-thread/info.txt
new file mode 100644
index 0000000..c8c9189
--- /dev/null
+++ b/test/1934-jvmti-signal-thread/info.txt
@@ -0,0 +1,3 @@
+Tests basic functions in the jvmti plugin.
+
+Tests that the GetBytecodes function works as expected.
diff --git a/test/1934-jvmti-signal-thread/run b/test/1934-jvmti-signal-thread/run
new file mode 100755
index 0000000..e92b873
--- /dev/null
+++ b/test/1934-jvmti-signal-thread/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti
diff --git a/test/1934-jvmti-signal-thread/signal_threads.cc b/test/1934-jvmti-signal-thread/signal_threads.cc
new file mode 100644
index 0000000..726a7a86
--- /dev/null
+++ b/test/1934-jvmti-signal-thread/signal_threads.cc
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <pthread.h>
+
+#include <cstdio>
+#include <iostream>
+#include <vector>
+
+#include "android-base/logging.h"
+#include "jni.h"
+#include "jvmti.h"
+
+#include "scoped_local_ref.h"
+#include "scoped_primitive_array.h"
+
+// Test infrastructure
+#include "jvmti_helper.h"
+#include "test_env.h"
+
+namespace art {
+namespace Test1934SignalThreads {
+
+struct NativeMonitor {
+ jrawMonitorID continue_monitor;
+ bool should_continue;
+ jrawMonitorID start_monitor;
+ bool should_start;
+};
+
+extern "C" JNIEXPORT jlong JNICALL Java_art_Test1934_allocNativeMonitor(JNIEnv* env, jclass) {
+ NativeMonitor* mon;
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->Allocate(sizeof(NativeMonitor),
+ reinterpret_cast<unsigned char**>(&mon)))) {
+ return -1l;
+ }
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->CreateRawMonitor("test-1934 start",
+ &mon->start_monitor))) {
+ return -1l;
+ }
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->CreateRawMonitor("test-1934 continue",
+ &mon->continue_monitor))) {
+ return -1l;
+ }
+ mon->should_continue = false;
+ mon->should_start = false;
+ return static_cast<jlong>(reinterpret_cast<intptr_t>(mon));
+}
+
+extern "C" JNIEXPORT void Java_art_Test1934_nativeWaitForOtherThread(JNIEnv* env,
+ jclass,
+ jlong id) {
+ NativeMonitor* mon = reinterpret_cast<NativeMonitor*>(static_cast<intptr_t>(id));
+ // Start
+ if (JvmtiErrorToException(env, jvmti_env, jvmti_env->RawMonitorEnter(mon->start_monitor))) {
+ return;
+ }
+ mon->should_start = true;
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->RawMonitorNotifyAll(mon->start_monitor))) {
+ JvmtiErrorToException(env, jvmti_env, jvmti_env->RawMonitorExit(mon->start_monitor));
+ return;
+ }
+ if (JvmtiErrorToException(env, jvmti_env, jvmti_env->RawMonitorExit(mon->start_monitor))) {
+ return;
+ }
+
+ // Finish
+ if (JvmtiErrorToException(env, jvmti_env, jvmti_env->RawMonitorEnter(mon->continue_monitor))) {
+ return;
+ }
+ while (!mon->should_continue) {
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->RawMonitorWait(mon->continue_monitor, -1l))) {
+ JvmtiErrorToException(env, jvmti_env, jvmti_env->RawMonitorExit(mon->continue_monitor));
+ return;
+ }
+ }
+ JvmtiErrorToException(env, jvmti_env, jvmti_env->RawMonitorExit(mon->continue_monitor));
+}
+
+extern "C" JNIEXPORT void Java_art_Test1934_nativeDoInterleaved(JNIEnv* env,
+ jclass,
+ jlong id,
+ jobject closure) {
+ NativeMonitor* mon = reinterpret_cast<NativeMonitor*>(static_cast<intptr_t>(id));
+ // Wait for start.
+ if (JvmtiErrorToException(env, jvmti_env, jvmti_env->RawMonitorEnter(mon->start_monitor))) {
+ return;
+ }
+ while (!mon->should_start) {
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->RawMonitorWait(mon->start_monitor, -1l))) {
+ return;
+ }
+ }
+ if (JvmtiErrorToException(env, jvmti_env, jvmti_env->RawMonitorExit(mon->start_monitor))) {
+ return;
+ }
+
+ // Call closure.
+ ScopedLocalRef<jclass> runnable_klass(env, env->FindClass("java/lang/Runnable"));
+ if (env->ExceptionCheck()) {
+ return;
+ }
+ jmethodID doRun = env->GetMethodID(runnable_klass.get(), "run", "()V");
+ if (env->ExceptionCheck()) {
+ return;
+ }
+ env->CallVoidMethod(closure, doRun);
+
+ // Tell other thread to finish.
+ if (JvmtiErrorToException(env, jvmti_env, jvmti_env->RawMonitorEnter(mon->continue_monitor))) {
+ return;
+ }
+ mon->should_continue = true;
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->RawMonitorNotifyAll(mon->continue_monitor))) {
+ JvmtiErrorToException(env, jvmti_env, jvmti_env->RawMonitorExit(mon->continue_monitor));
+ return;
+ }
+ JvmtiErrorToException(env, jvmti_env, jvmti_env->RawMonitorExit(mon->continue_monitor));
+}
+
+extern "C" JNIEXPORT void Java_art_Test1934_destroyNativeMonitor(JNIEnv*, jclass, jlong id) {
+ NativeMonitor* mon = reinterpret_cast<NativeMonitor*>(static_cast<intptr_t>(id));
+ jvmti_env->DestroyRawMonitor(mon->start_monitor);
+ jvmti_env->DestroyRawMonitor(mon->continue_monitor);
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(mon));
+}
+
+} // namespace Test1934SignalThreads
+} // namespace art
+
diff --git a/test/1934-jvmti-signal-thread/src/Main.java b/test/1934-jvmti-signal-thread/src/Main.java
new file mode 100644
index 0000000..539763c
--- /dev/null
+++ b/test/1934-jvmti-signal-thread/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ art.Test1934.run();
+ }
+}
diff --git a/test/1934-jvmti-signal-thread/src/art/Monitors.java b/test/1934-jvmti-signal-thread/src/art/Monitors.java
new file mode 100644
index 0000000..7fe2b60
--- /dev/null
+++ b/test/1934-jvmti-signal-thread/src/art/Monitors.java
@@ -0,0 +1,344 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Method;
+import java.util.concurrent.atomic.*;
+import java.util.function.Function;
+import java.util.stream.Stream;
+import java.util.Arrays;
+import java.util.Objects;
+
+public class Monitors {
+ public native static void setupMonitorEvents(
+ Class<?> method_klass,
+ Method monitor_contended_enter_event,
+ Method monitor_contended_entered_event,
+ Method monitor_wait_event,
+ Method monitor_waited_event,
+ Class<?> lock_klass,
+ Thread thr);
+ public native static void stopMonitorEvents();
+
+ public static class NamedLock {
+ public final String name;
+ private volatile int calledNotify;
+ public NamedLock(String name) {
+ this.name = name;
+ calledNotify = 0;
+ }
+
+ public String toString() {
+ return String.format("NamedLock[%s]", name);
+ }
+
+ public final void DoWait() throws Exception {
+ final int v = calledNotify;
+ while (v == calledNotify) {
+ wait();
+ }
+ }
+
+ public final void DoWait(long t) throws Exception {
+ final int v = calledNotify;
+ final long target = System.currentTimeMillis() + (t / 2);
+ while (v == calledNotify && (t < 0 || System.currentTimeMillis() < target)) {
+ wait(t);
+ }
+ }
+
+ public final void DoNotifyAll() throws Exception {
+ calledNotify++;
+ notifyAll();
+ }
+
+ public final void DoNotify() throws Exception {
+ calledNotify++;
+ notify();
+ }
+ }
+
+ public static final class MonitorUsage {
+ public final Object monitor;
+ public final Thread owner;
+ public final int entryCount;
+ public final Thread[] waiters;
+ public final Thread[] notifyWaiters;
+
+ public MonitorUsage(
+ Object monitor,
+ Thread owner,
+ int entryCount,
+ Thread[] waiters,
+ Thread[] notifyWaiters) {
+ this.monitor = monitor;
+ this.entryCount = entryCount;
+ this.owner = owner;
+ this.waiters = waiters;
+ this.notifyWaiters = notifyWaiters;
+ }
+
+ private static String toNameList(Thread[] ts) {
+ return Arrays.toString(Arrays.stream(ts).map((Thread t) -> t.getName()).toArray());
+ }
+
+ public String toString() {
+ return String.format(
+ "MonitorUsage{ monitor: %s, owner: %s, entryCount: %d, waiters: %s, notify_waiters: %s }",
+ monitor,
+ (owner != null) ? owner.getName() : "<NULL>",
+ entryCount,
+ toNameList(waiters),
+ toNameList(notifyWaiters));
+ }
+ }
+
+ public static native MonitorUsage getObjectMonitorUsage(Object monitor);
+ public static native Object getCurrentContendedMonitor(Thread thr);
+
+ public static class TestException extends Error {
+ public TestException() { super(); }
+ public TestException(String s) { super(s); }
+ public TestException(String s, Throwable c) { super(s, c); }
+ }
+
+ public static class LockController {
+ private static enum Action { HOLD, RELEASE, NOTIFY, NOTIFY_ALL, WAIT, TIMED_WAIT }
+
+ public final NamedLock lock;
+ public final long timeout;
+ private final AtomicStampedReference<Action> action;
+ private volatile Thread runner = null;
+ private volatile boolean started = false;
+ private volatile boolean held = false;
+ private static final AtomicInteger cnt = new AtomicInteger(0);
+ private volatile Throwable exe;
+
+ public LockController(NamedLock lock) {
+ this(lock, 10 * 1000);
+ }
+ public LockController(NamedLock lock, long timeout) {
+ this.lock = lock;
+ this.timeout = timeout;
+ this.action = new AtomicStampedReference(Action.HOLD, 0);
+ this.exe = null;
+ }
+
+ public boolean IsWorkerThread(Thread thd) {
+ return Objects.equals(runner, thd);
+ }
+
+ public boolean IsLocked() {
+ checkException();
+ return held;
+ }
+
+ public void checkException() {
+ if (exe != null) {
+ throw new TestException("Exception thrown by other thread!", exe);
+ }
+ }
+
+ private void setAction(Action a) {
+ int stamp = action.getStamp();
+ // Wait for it to be HOLD before updating.
+ while (!action.compareAndSet(Action.HOLD, a, stamp, stamp + 1)) {
+ stamp = action.getStamp();
+ }
+ }
+
+ public synchronized void suspendWorker() throws Exception {
+ checkException();
+ if (runner == null) {
+ throw new TestException("We don't have any runner holding " + lock);
+ }
+ Suspension.suspend(runner);
+ }
+
+ public Object getWorkerContendedMonitor() throws Exception {
+ checkException();
+ if (runner == null) {
+ return null;
+ }
+ return getCurrentContendedMonitor(runner);
+ }
+
+ public synchronized void DoLock() {
+ if (IsLocked()) {
+ throw new Error("lock is already acquired or being acquired.");
+ }
+ if (runner != null) {
+ throw new Error("Already have thread!");
+ }
+ runner = new Thread(() -> {
+ started = true;
+ try {
+ synchronized (lock) {
+ held = true;
+ int[] stamp_h = new int[] { -1 };
+ Action cur_action = Action.HOLD;
+ try {
+ while (true) {
+ cur_action = action.get(stamp_h);
+ int stamp = stamp_h[0];
+ if (cur_action == Action.RELEASE) {
+ // The other thread will deal with reseting action.
+ break;
+ }
+ try {
+ switch (cur_action) {
+ case HOLD:
+ Thread.yield();
+ break;
+ case NOTIFY:
+ lock.DoNotify();
+ break;
+ case NOTIFY_ALL:
+ lock.DoNotifyAll();
+ break;
+ case TIMED_WAIT:
+ lock.DoWait(timeout);
+ break;
+ case WAIT:
+ lock.DoWait();
+ break;
+ default:
+ throw new Error("Unknown action " + action);
+ }
+ } finally {
+ // reset action back to hold if it isn't something else.
+ action.compareAndSet(cur_action, Action.HOLD, stamp, stamp+1);
+ }
+ }
+ } catch (Exception e) {
+ throw new TestException("Got an error while performing action " + cur_action, e);
+ }
+ }
+ } finally {
+ held = false;
+ started = false;
+ }
+ }, "Locker thread " + cnt.getAndIncrement() + " for " + lock);
+ // Make sure we can get any exceptions this throws.
+ runner.setUncaughtExceptionHandler((t, e) -> { exe = e; });
+ runner.start();
+ }
+
+ public void waitForLockToBeHeld() throws Exception {
+ while (true) {
+ if (IsLocked() && Objects.equals(runner, Monitors.getObjectMonitorUsage(lock).owner)) {
+ return;
+ }
+ }
+ }
+
+ public synchronized void waitForNotifySleep() throws Exception {
+ if (runner == null) {
+ throw new Error("No thread trying to lock!");
+ }
+ do {
+ checkException();
+ } while (!started ||
+ !Arrays.asList(Monitors.getObjectMonitorUsage(lock).notifyWaiters).contains(runner));
+ }
+
+ public synchronized void waitForContendedSleep() throws Exception {
+ if (runner == null) {
+ throw new Error("No thread trying to lock!");
+ }
+ do {
+ checkException();
+ } while (!started ||
+ runner.getState() != Thread.State.BLOCKED ||
+ !Arrays.asList(Monitors.getObjectMonitorUsage(lock).waiters).contains(runner));
+ }
+
+ public synchronized void DoNotify() {
+ if (!IsLocked()) {
+ throw new Error("Not locked");
+ }
+ setAction(Action.NOTIFY);
+ }
+
+ public synchronized void DoNotifyAll() {
+ if (!IsLocked()) {
+ throw new Error("Not locked");
+ }
+ setAction(Action.NOTIFY_ALL);
+ }
+
+ public synchronized void DoTimedWait() throws Exception {
+ if (!IsLocked()) {
+ throw new Error("Not locked");
+ }
+ setAction(Action.TIMED_WAIT);
+ }
+
+ public synchronized void DoWait() throws Exception {
+ if (!IsLocked()) {
+ throw new Error("Not locked");
+ }
+ setAction(Action.WAIT);
+ }
+
+ public synchronized void interruptWorker() throws Exception {
+ if (!IsLocked()) {
+ throw new Error("Not locked");
+ }
+ runner.interrupt();
+ }
+
+ public synchronized void waitForActionToFinish() throws Exception {
+ checkException();
+ while (action.getReference() != Action.HOLD) { checkException(); }
+ }
+
+ public synchronized void DoUnlock() throws Exception {
+ Error throwing = null;
+ if (!IsLocked()) {
+ // We might just be racing some exception that was thrown by the worker thread. Cache the
+ // exception, we will throw one from the worker before this one.
+ throwing = new Error("Not locked!");
+ }
+ setAction(Action.RELEASE);
+ Thread run = runner;
+ runner = null;
+ while (held) {}
+ run.join();
+ action.set(Action.HOLD, 0);
+ // Make sure to throw any exception that occurred since it might not have unlocked due to our
+ // request.
+ checkException();
+ DoCleanup();
+ if (throwing != null) {
+ throw throwing;
+ }
+ }
+
+ public synchronized void DoCleanup() throws Exception {
+ if (runner != null) {
+ Thread run = runner;
+ runner = null;
+ while (held) {}
+ run.join();
+ }
+ action.set(Action.HOLD, 0);
+ exe = null;
+ }
+ }
+}
+
diff --git a/test/1934-jvmti-signal-thread/src/art/Suspension.java b/test/1934-jvmti-signal-thread/src/art/Suspension.java
new file mode 100644
index 0000000..16e62cc
--- /dev/null
+++ b/test/1934-jvmti-signal-thread/src/art/Suspension.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+public class Suspension {
+ // Suspends a thread using jvmti.
+ public native static void suspend(Thread thr);
+
+ // Resumes a thread using jvmti.
+ public native static void resume(Thread thr);
+
+ public native static boolean isSuspended(Thread thr);
+
+ public native static int[] suspendList(Thread... threads);
+ public native static int[] resumeList(Thread... threads);
+}
diff --git a/test/1934-jvmti-signal-thread/src/art/Test1934.java b/test/1934-jvmti-signal-thread/src/art/Test1934.java
new file mode 100644
index 0000000..552570a
--- /dev/null
+++ b/test/1934-jvmti-signal-thread/src/art/Test1934.java
@@ -0,0 +1,260 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.concurrent.Semaphore;
+import java.util.Objects;
+
+public class Test1934 {
+ public static final boolean PRINT_STACK_TRACE = false;
+
+ public static void run() throws Exception {
+ System.out.println("Interrupt before start");
+ testInterruptBeforeStart();
+
+ System.out.println("Stop before start");
+ testStopBeforeStart();
+
+ System.out.println("Interrupt recur");
+ testInterruptRecur();
+
+ System.out.println("Stop Recur");
+ testStopRecur();
+
+ System.out.println("Interrupt spinning");
+ testInterruptSpinning();
+
+ System.out.println("Stop spinning");
+ testStopSpinning();
+
+ System.out.println("Interrupt wait");
+ testInterruptWait();
+
+ System.out.println("Stop wait");
+ testStopWait();
+
+ System.out.println("Stop in native");
+ testStopInNative();
+ }
+
+ public static void testStopBeforeStart() throws Exception {
+ final Throwable[] out_err = new Throwable[] { null, };
+ final Object tst = new Object();
+ Thread target = new Thread(() -> { while (true) { } }, "waiting thread!");
+ target.setUncaughtExceptionHandler((t, e) -> { out_err[0] = e; });
+ System.out.println("stopping other thread before starting");
+ try {
+ Threads.stopThread(target, new Error("AWESOME"));
+ target.start();
+ target.join();
+ System.out.println("Other thread Stopped by: " + out_err[0]);
+ if (PRINT_STACK_TRACE && out_err[0] != null) {
+ out_err[0].printStackTrace();
+ }
+ } catch (Exception e) {
+ System.out.println("Caught exception " + e);
+ }
+ }
+
+ public static void testInterruptBeforeStart() throws Exception {
+ final Throwable[] out_err = new Throwable[] { null, };
+ final Object tst = new Object();
+ Thread target = new Thread(() -> { while (true) { } }, "waiting thread!");
+ target.setUncaughtExceptionHandler((t, e) -> { out_err[0] = e; });
+ System.out.println("interrupting other thread before starting");
+ try {
+ Threads.interruptThread(target);
+ target.start();
+ target.join();
+ System.out.println("Other thread interrupted. err: " + out_err[0]);
+ if (PRINT_STACK_TRACE && out_err[0] != null) {
+ out_err[0].printStackTrace();
+ }
+ } catch (Exception e) {
+ System.out.println("Caught exception " + e);
+ }
+ }
+
+ public static void testStopWait() throws Exception {
+ final Throwable[] out_err = new Throwable[] { null, };
+ final Object tst = new Object();
+ final Semaphore sem = new Semaphore(0);
+ Thread target = new Thread(() -> {
+ sem.release();
+ while (true) {
+ try {
+ synchronized (tst) {
+ tst.wait();
+ }
+ } catch (InterruptedException e) { throw new Error("Interrupted!", e); }
+ }
+ }, "waiting thread!");
+ target.setUncaughtExceptionHandler((t, e) -> { out_err[0] = e; });
+ target.start();
+ sem.acquire();
+ while (!Objects.equals(Monitors.getCurrentContendedMonitor(target), tst)) {}
+ System.out.println("stopping other thread waiting");
+ Threads.stopThread(target, new Error("AWESOME"));
+ target.join();
+ System.out.println("Other thread Stopped by: " + out_err[0]);
+ if (PRINT_STACK_TRACE && out_err[0] != null) {
+ out_err[0].printStackTrace();
+ }
+ }
+
+ public static void testInterruptWait() throws Exception {
+ final Throwable[] out_err = new Throwable[] { null, };
+ final Object tst = new Object();
+ final Semaphore sem = new Semaphore(0);
+ Thread target = new Thread(() -> {
+ sem.release();
+ while (true) {
+ try {
+ synchronized (tst) {
+ tst.wait();
+ }
+ } catch (InterruptedException e) { throw new Error("Interrupted!", e); }
+ }
+ }, "waiting thread!");
+ target.setUncaughtExceptionHandler((t, e) -> { out_err[0] = e; });
+ target.start();
+ sem.acquire();
+ while (!Objects.equals(Monitors.getCurrentContendedMonitor(target), tst)) {}
+ System.out.println("interrupting other thread waiting");
+ Threads.interruptThread(target);
+ target.join();
+ System.out.println("Other thread interrupted. err: " + out_err[0]);
+ if (PRINT_STACK_TRACE && out_err[0] != null) {
+ out_err[0].printStackTrace();
+ }
+ }
+
+ public static void doNothing() {}
+ public static native long allocNativeMonitor();
+ public static native void nativeWaitForOtherThread(long id);
+ public static native void nativeDoInterleaved(long id, Runnable op);
+ public static native void destroyNativeMonitor(long id);
+ public static void testStopInNative() throws Exception {
+ final Throwable[] out_err = new Throwable[] { null, };
+ final long native_monitor_id = allocNativeMonitor();
+ final Semaphore sem = new Semaphore(0);
+ Thread target = new Thread(() -> {
+ sem.release();
+ nativeWaitForOtherThread(native_monitor_id);
+ // We need to make sure we do something that can get the exception to be actually noticed.
+ doNothing();
+ }, "native waiting thread!");
+ target.setUncaughtExceptionHandler((t, e) -> { out_err[0] = e; });
+ target.start();
+ sem.acquire();
+ System.out.println("stopping other thread");
+ nativeDoInterleaved(
+ native_monitor_id,
+ () -> { Threads.stopThread(target, new Error("AWESOME")); });
+ target.join();
+ System.out.println("Other thread Stopped by: " + out_err[0]);
+ if (PRINT_STACK_TRACE && out_err[0] != null) {
+ out_err[0].printStackTrace();
+ }
+ destroyNativeMonitor(native_monitor_id);
+ }
+
+ public static void doRecur(Runnable r) {
+ if (r != null) {
+ r.run();
+ }
+ doRecur(r);
+ }
+
+ public static void testStopRecur() throws Exception {
+ final Throwable[] out_err = new Throwable[] { null, };
+ final Semaphore sem = new Semaphore(0);
+ Thread target = new Thread(() -> {
+ sem.release();
+ while (true) {
+ try {
+ doRecur(null);
+ } catch (StackOverflowError e) {}
+ }
+ }, "recuring thread!");
+ target.setUncaughtExceptionHandler((t, e) -> { out_err[0] = e; });
+ target.start();
+ sem.acquire();
+ System.out.println("stopping other thread recurring");
+ Threads.stopThread(target, new Error("AWESOME!"));
+ target.join();
+ System.out.println("Other thread Stopped by: " + out_err[0]);
+ if (PRINT_STACK_TRACE && out_err[0] != null) {
+ out_err[0].printStackTrace();
+ }
+ }
+
+ public static void testInterruptRecur() throws Exception {
+ final Throwable[] out_err = new Throwable[] { null, };
+ final Semaphore sem = new Semaphore(0);
+ Thread target = new Thread(() -> {
+ sem.release();
+ while (true) {
+ try {
+ doRecur(() -> {
+ if (Thread.currentThread().isInterrupted()) { throw new Error("Interrupted!"); }
+ });
+ } catch (StackOverflowError e) { }
+ }
+ }, "recuring thread!");
+ target.setUncaughtExceptionHandler((t, e) -> { out_err[0] = e; });
+ target.start();
+ sem.acquire();
+ System.out.println("Interrupting other thread recurring");
+ Threads.interruptThread(target);
+ target.join();
+ System.out.println("Other thread Interrupted. err: " + out_err[0]);
+ if (PRINT_STACK_TRACE && out_err[0] != null) {
+ out_err[0].printStackTrace();
+ }
+ }
+
+ public static void testStopSpinning() throws Exception {
+ final Throwable[] out_err = new Throwable[] { null, };
+ final Semaphore sem = new Semaphore(0);
+ Thread target = new Thread(() -> { sem.release(); while (true) {} }, "Spinning thread!");
+ target.setUncaughtExceptionHandler((t, e) -> { out_err[0] = e; });
+ target.start();
+ sem.acquire();
+ System.out.println("stopping other thread spinning");
+ Threads.stopThread(target, new Error("AWESOME!"));
+ target.join();
+ System.out.println("Other thread Stopped by: " + out_err[0]);
+ if (PRINT_STACK_TRACE && out_err[0] != null) {
+ out_err[0].printStackTrace();
+ }
+ }
+
+ public static void testInterruptSpinning() throws Exception {
+ final Semaphore sem = new Semaphore(0);
+ Thread target = new Thread(() -> {
+ sem.release();
+ while (!Thread.currentThread().isInterrupted()) { }
+ }, "Spinning thread!");
+ target.start();
+ sem.acquire();
+ System.out.println("Interrupting other thread spinning");
+ Threads.interruptThread(target);
+ target.join();
+ System.out.println("Other thread Interrupted.");
+ }
+}
diff --git a/test/1934-jvmti-signal-thread/src/art/Threads.java b/test/1934-jvmti-signal-thread/src/art/Threads.java
new file mode 100644
index 0000000..266813b
--- /dev/null
+++ b/test/1934-jvmti-signal-thread/src/art/Threads.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+public class Threads {
+ public static native void interruptThread(Thread t);
+ public static native void stopThread(Thread t, Throwable thr);
+}
diff --git a/test/1936-thread-end-events/check b/test/1936-thread-end-events/check
new file mode 100644
index 0000000..8a84388
--- /dev/null
+++ b/test/1936-thread-end-events/check
@@ -0,0 +1,22 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The RI sends an extra event that art doesn't. Add it to the expected output.
+if [[ "$TEST_RUNTIME" == "jvm" ]]; then
+ patch -p0 expected.txt < jvm-expected.patch >/dev/null
+fi
+
+./default-check "$@"
diff --git a/test/1936-thread-end-events/expected.txt b/test/1936-thread-end-events/expected.txt
new file mode 100644
index 0000000..6b71c5e
--- /dev/null
+++ b/test/1936-thread-end-events/expected.txt
@@ -0,0 +1,42 @@
+Entered public static void art.Test1936.foo()
+Thread: test-thread
+ | alive: true
+ | interrupted: false
+ | daemon: false
+ | group: java.lang.ThreadGroup[name=main,maxpri=10]
+
+Entered void java.lang.ThreadGroup.threadTerminated(java.lang.Thread)
+Thread: test-thread
+ | alive: true
+ | interrupted: false
+ | daemon: false
+ | group: java.lang.ThreadGroup[name=main,maxpri=10]
+
+Entered private void java.lang.ThreadGroup.remove(java.lang.Thread)
+Thread: test-thread
+ | alive: true
+ | interrupted: false
+ | daemon: false
+ | group: java.lang.ThreadGroup[name=main,maxpri=10]
+
+Entered public static native void java.lang.System.arraycopy(java.lang.Object,int,java.lang.Object,int,int)
+Thread: test-thread
+ | alive: true
+ | interrupted: false
+ | daemon: false
+ | group: java.lang.ThreadGroup[name=main,maxpri=10]
+
+Entered public static void art.Test1936.NotifyThreadEnd(java.lang.Thread)
+Thread: test-thread
+ | alive: true
+ | interrupted: false
+ | daemon: false
+ | group: java.lang.ThreadGroup[name=main,maxpri=10]
+
+Entered public static void art.Test1936.foo()
+Thread: test-thread
+ | alive: true
+ | interrupted: false
+ | daemon: false
+ | group: java.lang.ThreadGroup[name=main,maxpri=10]
+
diff --git a/test/1936-thread-end-events/info.txt b/test/1936-thread-end-events/info.txt
new file mode 100644
index 0000000..51986c4
--- /dev/null
+++ b/test/1936-thread-end-events/info.txt
@@ -0,0 +1,6 @@
+Tests JVMTI ThreadEnd bug
+
+We had a bug where we were still sending events after JVMTI_EVENT_THREAD_END due
+to where we sent the event. This test ensures that the placement of the
+THREAD_END event is correct.
+
diff --git a/test/1936-thread-end-events/jvm-expected.patch b/test/1936-thread-end-events/jvm-expected.patch
new file mode 100644
index 0000000..ddb30a3
--- /dev/null
+++ b/test/1936-thread-end-events/jvm-expected.patch
@@ -0,0 +1,16 @@
+7a8,14
+> Entered private void java.lang.Thread.exit()
+> Thread: test-thread
+> | alive: true
+> | interrupted: false
+> | daemon: false
+> | group: java.lang.ThreadGroup[name=main,maxpri=10]
+>
+34c41
+< | group: java.lang.ThreadGroup[name=main,maxpri=10]
+---
+> | group: null
+41c48
+< | group: java.lang.ThreadGroup[name=main,maxpri=10]
+---
+> | group: null
diff --git a/test/1936-thread-end-events/method_trace.cc b/test/1936-thread-end-events/method_trace.cc
new file mode 100644
index 0000000..019b6a9
--- /dev/null
+++ b/test/1936-thread-end-events/method_trace.cc
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inttypes.h>
+
+#include <cstdio>
+#include <memory>
+
+#include "android-base/logging.h"
+#include "android-base/stringprintf.h"
+
+#include "jni.h"
+#include "jvmti.h"
+#include "scoped_local_ref.h"
+
+// Test infrastructure
+#include "jni_binder.h"
+#include "jni_helper.h"
+#include "jvmti_helper.h"
+#include "test_env.h"
+#include "ti_macros.h"
+
+namespace art {
+namespace Test989StackTraceThrow {
+
+extern "C" JNIEXPORT
+jfloat JNICALL Java_art_Test989_returnFloatNative(JNIEnv* env, jclass klass) {
+ jmethodID targetMethod = env->GetStaticMethodID(klass, "doGetFloat", "()F");
+ return env->CallStaticFloatMethod(klass, targetMethod);
+}
+extern "C" JNIEXPORT
+jdouble JNICALL Java_art_Test989_returnDoubleNative(JNIEnv* env, jclass klass) {
+ jmethodID targetMethod = env->GetStaticMethodID(klass, "doGetDouble", "()D");
+ return env->CallStaticDoubleMethod(klass, targetMethod);
+}
+
+extern "C" JNIEXPORT jobject JNICALL Java_art_Test989_returnValueNative(JNIEnv* env, jclass klass) {
+ jmethodID targetMethod = env->GetStaticMethodID(klass, "mkTestObject", "()Ljava/lang/Object;");
+ return env->CallStaticObjectMethod(klass, targetMethod);
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_Test989_doNothingNative(JNIEnv* env ATTRIBUTE_UNUSED,
+ jclass klass ATTRIBUTE_UNUSED) {
+ return;
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_Test989_throwANative(JNIEnv* env,
+ jclass klass) {
+ jmethodID targetMethod = env->GetStaticMethodID(klass, "doThrowA", "()V");
+ env->CallStaticVoidMethod(klass, targetMethod);
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_Test989_acceptValueNative(JNIEnv* env,
+ jclass klass,
+ jobject arg) {
+ jmethodID targetMethod = env->GetStaticMethodID(klass, "printObject", "(Ljava/lang/Object;)V");
+ env->CallStaticVoidMethod(klass, targetMethod, arg);
+}
+
+} // namespace Test989StackTraceThrow
+} // namespace art
+
diff --git a/test/1936-thread-end-events/run b/test/1936-thread-end-events/run
new file mode 100755
index 0000000..51875a7
--- /dev/null
+++ b/test/1936-thread-end-events/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Ask for stack traces to be dumped to a file rather than to stdout.
+./default-run "$@" --jvmti
diff --git a/test/1936-thread-end-events/src/Main.java b/test/1936-thread-end-events/src/Main.java
new file mode 100644
index 0000000..da66fc3
--- /dev/null
+++ b/test/1936-thread-end-events/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ art.Test1936.run();
+ }
+}
diff --git a/test/1936-thread-end-events/src/art/Test1936.java b/test/1936-thread-end-events/src/art/Test1936.java
new file mode 100644
index 0000000..868deca
--- /dev/null
+++ b/test/1936-thread-end-events/src/art/Test1936.java
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+public class Test1936 {
+ public static void foo() {}
+
+ public static void NotifyThreadEnd(Thread me) {
+ // Don't actually do anything.
+ foo();
+ }
+
+ public static void NotifyMethodEntry(Object o) {
+ System.out.println("Entered " + o.toString());
+ Thread me = Thread.currentThread();
+ System.out.println(String.format(
+ "Thread: %s\n" +
+ " | alive: %b\n" +
+ " | interrupted: %b\n" +
+ " | daemon: %b\n" +
+ " | group: %s\n",
+ me.getName(), me.isAlive(), me.isInterrupted(), me.isDaemon(), me.getThreadGroup()));
+ }
+
+ public static native void waitForever();
+ private static void setupTracing(Thread target) throws Exception {
+ Trace.disableTracing(target);
+ Trace.enableTracing2(
+ Test1936.class,
+ Test1936.class.getDeclaredMethod("NotifyMethodEntry", Object.class),
+ /*exit*/null,
+ /*field_access*/null,
+ /*field_modify*/null,
+ /*single_step*/null,
+ /*thread_start*/null,
+ Test1936.class.getDeclaredMethod("NotifyThreadEnd", Thread.class),
+ target);
+ }
+
+
+ public static void run() throws Exception {
+ Thread t = new Thread(() -> {
+ try {
+ setupTracing(Thread.currentThread());
+ foo();
+ } catch (Exception e) {
+ System.out.println("Caught exception " + e + "!");
+ e.printStackTrace();
+ }
+ }, "test-thread");
+ t.start();
+ t.join();
+ }
+}
diff --git a/test/1936-thread-end-events/src/art/Trace.java b/test/1936-thread-end-events/src/art/Trace.java
new file mode 100644
index 0000000..8999bb1
--- /dev/null
+++ b/test/1936-thread-end-events/src/art/Trace.java
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+
+public class Trace {
+ public static native void enableTracing(Class<?> methodClass,
+ Method entryMethod,
+ Method exitMethod,
+ Method fieldAccess,
+ Method fieldModify,
+ Method singleStep,
+ Thread thr);
+ public static native void disableTracing(Thread thr);
+
+ public static void enableFieldTracing(Class<?> methodClass,
+ Method fieldAccess,
+ Method fieldModify,
+ Thread thr) {
+ enableTracing(methodClass, null, null, fieldAccess, fieldModify, null, thr);
+ }
+
+ public static void enableMethodTracing(Class<?> methodClass,
+ Method entryMethod,
+ Method exitMethod,
+ Thread thr) {
+ enableTracing(methodClass, entryMethod, exitMethod, null, null, null, thr);
+ }
+
+ public static void enableSingleStepTracing(Class<?> methodClass,
+ Method singleStep,
+ Thread thr) {
+ enableTracing(methodClass, null, null, null, null, singleStep, thr);
+ }
+
+ public static native void watchFieldAccess(Field f);
+ public static native void watchFieldModification(Field f);
+ public static native void watchAllFieldAccesses();
+ public static native void watchAllFieldModifications();
+
+ // the names, arguments, and even line numbers of these functions are embedded in the tests so we
+ // need to add to the bottom and not modify old ones to maintain compat.
+ public static native void enableTracing2(Class<?> methodClass,
+ Method entryMethod,
+ Method exitMethod,
+ Method fieldAccess,
+ Method fieldModify,
+ Method singleStep,
+ Method ThreadStart,
+ Method ThreadEnd,
+ Thread thr);
+}
diff --git a/test/1937-transform-soft-fail/check b/test/1937-transform-soft-fail/check
new file mode 100755
index 0000000..7cee530
--- /dev/null
+++ b/test/1937-transform-soft-fail/check
@@ -0,0 +1,19 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+sed -e 's/:.*$//' "$2" > "$2.tmp"
+
+./default-check "$1" "$2.tmp"
diff --git a/test/1937-transform-soft-fail/expected.txt b/test/1937-transform-soft-fail/expected.txt
new file mode 100644
index 0000000..f0f6ac8
--- /dev/null
+++ b/test/1937-transform-soft-fail/expected.txt
@@ -0,0 +1,3 @@
+hello
+throwing
+Caught exception java.lang.NoSuchMethodError
diff --git a/test/1937-transform-soft-fail/info.txt b/test/1937-transform-soft-fail/info.txt
new file mode 100644
index 0000000..875a5f6
--- /dev/null
+++ b/test/1937-transform-soft-fail/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/1937-transform-soft-fail/run b/test/1937-transform-soft-fail/run
new file mode 100755
index 0000000..c6e62ae
--- /dev/null
+++ b/test/1937-transform-soft-fail/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti
diff --git a/test/1937-transform-soft-fail/src/Main.java b/test/1937-transform-soft-fail/src/Main.java
new file mode 100644
index 0000000..e3541b3
--- /dev/null
+++ b/test/1937-transform-soft-fail/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ art.Test1937.run();
+ }
+}
diff --git a/test/1937-transform-soft-fail/src/art/Redefinition.java b/test/1937-transform-soft-fail/src/art/Redefinition.java
new file mode 100644
index 0000000..56d2938
--- /dev/null
+++ b/test/1937-transform-soft-fail/src/art/Redefinition.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.ArrayList;
+// Common Redefinition functions. Placed here for use by CTS
+public class Redefinition {
+ public static final class CommonClassDefinition {
+ public final Class<?> target;
+ public final byte[] class_file_bytes;
+ public final byte[] dex_file_bytes;
+
+ public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
+ this.target = target;
+ this.class_file_bytes = class_file_bytes;
+ this.dex_file_bytes = dex_file_bytes;
+ }
+ }
+
+ // A set of possible test configurations. Test should set this if they need to.
+ // This must be kept in sync with the defines in ti-agent/common_helper.cc
+ public static enum Config {
+ COMMON_REDEFINE(0),
+ COMMON_RETRANSFORM(1),
+ COMMON_TRANSFORM(2);
+
+ private final int val;
+ private Config(int val) {
+ this.val = val;
+ }
+ }
+
+ public static void setTestConfiguration(Config type) {
+ nativeSetTestConfiguration(type.val);
+ }
+
+ private static native void nativeSetTestConfiguration(int type);
+
+ // Transforms the class
+ public static native void doCommonClassRedefinition(Class<?> target,
+ byte[] classfile,
+ byte[] dexfile);
+
+ public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
+ ArrayList<Class<?>> classes = new ArrayList<>();
+ ArrayList<byte[]> class_files = new ArrayList<>();
+ ArrayList<byte[]> dex_files = new ArrayList<>();
+
+ for (CommonClassDefinition d : defs) {
+ classes.add(d.target);
+ class_files.add(d.class_file_bytes);
+ dex_files.add(d.dex_file_bytes);
+ }
+ doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
+ class_files.toArray(new byte[0][]),
+ dex_files.toArray(new byte[0][]));
+ }
+
+ public static void addMultiTransformationResults(CommonClassDefinition... defs) {
+ for (CommonClassDefinition d : defs) {
+ addCommonTransformationResult(d.target.getCanonicalName(),
+ d.class_file_bytes,
+ d.dex_file_bytes);
+ }
+ }
+
+ public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
+ byte[][] classfiles,
+ byte[][] dexfiles);
+ public static native void doCommonClassRetransformation(Class<?>... target);
+ public static native void setPopRetransformations(boolean pop);
+ public static native void popTransformationFor(String name);
+ public static native void enableCommonRetransformation(boolean enable);
+ public static native void addCommonTransformationResult(String target_name,
+ byte[] class_bytes,
+ byte[] dex_bytes);
+}
diff --git a/test/1937-transform-soft-fail/src/art/Test1937.java b/test/1937-transform-soft-fail/src/art/Test1937.java
new file mode 100644
index 0000000..7255a5e
--- /dev/null
+++ b/test/1937-transform-soft-fail/src/art/Test1937.java
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.Base64;
+public class Test1937 {
+
+ static class Transform {
+ public void sayHi() {
+ // Use lower 'h' to make sure the string will have a different string id
+ // than the transformation (the transformation code is the same except
+ // the actual printed String, which was making the test inacurately passing
+ // in JIT mode when loading the string from the dex cache, as the string ids
+ // of the two different strings were the same).
+ // We know the string ids will be different because lexicographically:
+ // "Goodbye" < "LTransform;" < "hello".
+ System.out.println("hello");
+ }
+ }
+
+ /**
+ * base64 encoded class/dex file for
+ * class Transform {
+ * public void sayHi() {
+ * System.out.println("throwing");
+ * Redefinition.notPresent();
+ * }
+ * }
+ */
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAJQoABwAPCQAQABEIABIKABMAFAoAFQAWBwAYBwAbAQAGPGluaXQ+AQADKClWAQAE" +
+ "Q29kZQEAD0xpbmVOdW1iZXJUYWJsZQEABXNheUhpAQAKU291cmNlRmlsZQEADVRlc3QxOTM3Lmph" +
+ "dmEMAAgACQcAHAwAHQAeAQAIdGhyb3dpbmcHAB8MACAAIQcAIgwAIwAJBwAkAQAWYXJ0L1Rlc3Qx" +
+ "OTM3JFRyYW5zZm9ybQEACVRyYW5zZm9ybQEADElubmVyQ2xhc3NlcwEAEGphdmEvbGFuZy9PYmpl" +
+ "Y3QBABBqYXZhL2xhbmcvU3lzdGVtAQADb3V0AQAVTGphdmEvaW8vUHJpbnRTdHJlYW07AQATamF2" +
+ "YS9pby9QcmludFN0cmVhbQEAB3ByaW50bG4BABUoTGphdmEvbGFuZy9TdHJpbmc7KVYBABBhcnQv" +
+ "UmVkZWZpbml0aW9uAQAKbm90UHJlc2VudAEADGFydC9UZXN0MTkzNwAgAAYABwAAAAAAAgAAAAgA" +
+ "CQABAAoAAAAdAAEAAQAAAAUqtwABsQAAAAEACwAAAAYAAQAAACMAAQAMAAkAAQAKAAAALAACAAEA" +
+ "AAAMsgACEgO2AAS4AAWxAAAAAQALAAAADgADAAAAJQAIACYACwAnAAIADQAAAAIADgAaAAAACgAB" +
+ "AAYAFwAZAAg=");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQDfmxvwUHv7EEBCvzjdM/uAviWG8eIsKIbsAwAAcAAAAHhWNBIAAAAAAAAAACgDAAAW" +
+ "AAAAcAAAAAoAAADIAAAAAgAAAPAAAAABAAAACAEAAAUAAAAQAQAAAQAAADgBAACUAgAAWAEAALoB" +
+ "AADCAQAA1gEAAPABAAAAAgAAJAIAAEQCAABbAgAAbwIAAIMCAACXAgAApgIAALECAAC0AgAAuAIA" +
+ "AMUCAADLAgAA1wIAANwCAADlAgAA7AIAAPYCAAABAAAAAgAAAAMAAAAEAAAABQAAAAYAAAAHAAAA" +
+ "CAAAAAkAAAAMAAAADAAAAAkAAAAAAAAADQAAAAkAAAC0AQAACAAFABEAAAAAAAAAEAAAAAEAAAAA" +
+ "AAAAAQAAABMAAAAFAAEAEgAAAAYAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAoAAACkAQAAGAMAAAAA" +
+ "AAACAAAACQMAAA8DAAABAAEAAQAAAP0CAAAEAAAAcBAEAAAADgADAAEAAgAAAAIDAAALAAAAYgAA" +
+ "ABoBFABuIAMAEABxAAAAAAAOAAAAWAEAAAAAAAAAAAAAAAAAAAEAAAAHAAY8aW5pdD4AEkxhcnQv" +
+ "UmVkZWZpbml0aW9uOwAYTGFydC9UZXN0MTkzNyRUcmFuc2Zvcm07AA5MYXJ0L1Rlc3QxOTM3OwAi" +
+ "TGRhbHZpay9hbm5vdGF0aW9uL0VuY2xvc2luZ0NsYXNzOwAeTGRhbHZpay9hbm5vdGF0aW9uL0lu" +
+ "bmVyQ2xhc3M7ABVMamF2YS9pby9QcmludFN0cmVhbTsAEkxqYXZhL2xhbmcvT2JqZWN0OwASTGph" +
+ "dmEvbGFuZy9TdHJpbmc7ABJMamF2YS9sYW5nL1N5c3RlbTsADVRlc3QxOTM3LmphdmEACVRyYW5z" +
+ "Zm9ybQABVgACVkwAC2FjY2Vzc0ZsYWdzAARuYW1lAApub3RQcmVzZW50AANvdXQAB3ByaW50bG4A" +
+ "BXNheUhpAAh0aHJvd2luZwAFdmFsdWUAIwAHDgAlAAcOeDwAAgMBFRgCAgQCDgQIDxcLAAABAQGA" +
+ "gATkAgIB/AIAABAAAAAAAAAAAQAAAAAAAAABAAAAFgAAAHAAAAACAAAACgAAAMgAAAADAAAAAgAA" +
+ "APAAAAAEAAAAAQAAAAgBAAAFAAAABQAAABABAAAGAAAAAQAAADgBAAADEAAAAQAAAFgBAAABIAAA" +
+ "AgAAAGQBAAAGIAAAAQAAAKQBAAABEAAAAQAAALQBAAACIAAAFgAAALoBAAADIAAAAgAAAP0CAAAE" +
+ "IAAAAgAAAAkDAAAAIAAAAQAAABgDAAAAEAAAAQAAACgDAAA=");
+
+ public static void run() {
+ Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+ doTest(new Transform());
+ }
+
+ public static void doTest(Transform t) {
+ t.sayHi();
+ Redefinition.doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+ try {
+ t.sayHi();
+ } catch (Throwable e) {
+ System.out.println("Caught exception " + e.getClass().getName() + ": " + e.getMessage());
+ }
+ }
+}
diff --git a/test/1938-transform-abstract-single-impl/expected.txt b/test/1938-transform-abstract-single-impl/expected.txt
new file mode 100644
index 0000000..6a06f9b
--- /dev/null
+++ b/test/1938-transform-abstract-single-impl/expected.txt
@@ -0,0 +1,4 @@
+JNI_OnLoad called
+Running sayHi() - hello
+redefining TransformAbstract
+Running sayHi() - Goodbye
diff --git a/test/1938-transform-abstract-single-impl/info.txt b/test/1938-transform-abstract-single-impl/info.txt
new file mode 100644
index 0000000..5df8306
--- /dev/null
+++ b/test/1938-transform-abstract-single-impl/info.txt
@@ -0,0 +1,2 @@
+Tests that single-implementation abstract methods don't crash the runtime when
+their declaring class is redefined.
diff --git a/test/1938-transform-abstract-single-impl/run b/test/1938-transform-abstract-single-impl/run
new file mode 100755
index 0000000..adb1a1c
--- /dev/null
+++ b/test/1938-transform-abstract-single-impl/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --no-app-image
diff --git a/test/1938-transform-abstract-single-impl/src/Main.java b/test/1938-transform-abstract-single-impl/src/Main.java
new file mode 100644
index 0000000..7ac2172
--- /dev/null
+++ b/test/1938-transform-abstract-single-impl/src/Main.java
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import art.Redefinition;
+import java.util.Base64;
+public class Main {
+ static abstract class TransformAbstract {
+ public abstract void doSayHi();
+
+ public void sayHi() {
+ System.out.println("hello");
+ }
+ }
+
+ static final class TransformConcrete extends TransformAbstract {
+ public final void doSayHi() {
+ System.out.print("Running sayHi() - ");
+ sayHi();
+ }
+ }
+
+ public static native void ensureJitCompiled(Class k, String m);
+
+ /**
+ * base64 encoded class/dex file for
+ * static abstract class TransformAbstract {
+ * public abstract void doSayHi();
+ * public void sayHi() {
+ * System.out.println("Goodbye");
+ * }
+ * }
+ */
+ private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADQAIQoABgAPCQAQABEIABIKABMAFAcAFgcAGQEABjxpbml0PgEAAygpVgEABENvZGUB" +
+ "AA9MaW5lTnVtYmVyVGFibGUBAAdkb1NheUhpAQAFc2F5SGkBAApTb3VyY2VGaWxlAQAJTWFpbi5q" +
+ "YXZhDAAHAAgHABoMABsAHAEAB0dvb2RieWUHAB0MAB4AHwcAIAEAFk1haW4kVHJhbnNmb3JtQWJz" +
+ "dHJhY3QBABFUcmFuc2Zvcm1BYnN0cmFjdAEADElubmVyQ2xhc3NlcwEAEGphdmEvbGFuZy9PYmpl" +
+ "Y3QBABBqYXZhL2xhbmcvU3lzdGVtAQADb3V0AQAVTGphdmEvaW8vUHJpbnRTdHJlYW07AQATamF2" +
+ "YS9pby9QcmludFN0cmVhbQEAB3ByaW50bG4BABUoTGphdmEvbGFuZy9TdHJpbmc7KVYBAARNYWlu" +
+ "BCAABQAGAAAAAAADAAAABwAIAAEACQAAAB0AAQABAAAABSq3AAGxAAAAAQAKAAAABgABAAAAHAQB" +
+ "AAsACAAAAAEADAAIAAEACQAAACUAAgABAAAACbIAAhIDtgAEsQAAAAEACgAAAAoAAgAAAB8ACAAg" +
+ "AAIADQAAAAIADgAYAAAACgABAAUAFQAXBAg=");
+ private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQCQkoTiKzIz0l96rtsnUxdY4Kwx+YINWFHEAwAAcAAAAHhWNBIAAAAAAAAAAAADAAAV" +
+ "AAAAcAAAAAkAAADEAAAAAgAAAOgAAAABAAAAAAEAAAUAAAAIAQAAAQAAADABAAB0AgAAUAEAAKoB" +
+ "AACyAQAAuwEAANUBAADdAQAAAQIAACECAAA4AgAATAIAAGACAAB0AgAAfwIAAJICAACVAgAAmQIA" +
+ "AKYCAACvAgAAtQIAALoCAADDAgAAygIAAAIAAAADAAAABAAAAAUAAAAGAAAABwAAAAgAAAAJAAAA" +
+ "DAAAAAwAAAAIAAAAAAAAAA0AAAAIAAAApAEAAAcABAARAAAAAAAAAAAAAAAAAAAADwAAAAAAAAAT" +
+ "AAAABAABABIAAAAFAAAAAAAAAAAAAAAABAAABQAAAAAAAAAKAAAAlAEAAOwCAAAAAAAAAgAAANwC" +
+ "AADiAgAAAQABAAEAAADRAgAABAAAAHAQBAAAAA4AAwABAAIAAADWAgAACAAAAGIAAAAaAQEAbiAD" +
+ "ABAADgBQAQAAAAAAAAAAAAAAAAAAAQAAAAYABjxpbml0PgAHR29vZGJ5ZQAYTE1haW4kVHJhbnNm" +
+ "b3JtQWJzdHJhY3Q7AAZMTWFpbjsAIkxkYWx2aWsvYW5ub3RhdGlvbi9FbmNsb3NpbmdDbGFzczsA" +
+ "HkxkYWx2aWsvYW5ub3RhdGlvbi9Jbm5lckNsYXNzOwAVTGphdmEvaW8vUHJpbnRTdHJlYW07ABJM" +
+ "amF2YS9sYW5nL09iamVjdDsAEkxqYXZhL2xhbmcvU3RyaW5nOwASTGphdmEvbGFuZy9TeXN0ZW07" +
+ "AAlNYWluLmphdmEAEVRyYW5zZm9ybUFic3RyYWN0AAFWAAJWTAALYWNjZXNzRmxhZ3MAB2RvU2F5" +
+ "SGkABG5hbWUAA291dAAHcHJpbnRsbgAFc2F5SGkABXZhbHVlABwABw4AHwAHDngAAgIBFBgBAgMC" +
+ "DiQIBBAXCwAAAQIAgIAE3AIBgQgAAQH0AgAAEAAAAAAAAAABAAAAAAAAAAEAAAAVAAAAcAAAAAIA" +
+ "AAAJAAAAxAAAAAMAAAACAAAA6AAAAAQAAAABAAAAAAEAAAUAAAAFAAAACAEAAAYAAAABAAAAMAEA" +
+ "AAMQAAABAAAAUAEAAAEgAAACAAAAXAEAAAYgAAABAAAAlAEAAAEQAAABAAAApAEAAAIgAAAVAAAA" +
+ "qgEAAAMgAAACAAAA0QIAAAQgAAACAAAA3AIAAAAgAAABAAAA7AIAAAAQAAABAAAAAAMAAA==");
+
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[0]);
+ Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+
+ ensureJitCompiled(TransformAbstract.class, "sayHi");
+ ensureJitCompiled(TransformConcrete.class, "doSayHi");
+
+ TransformAbstract t1 = new TransformConcrete();
+ t1.doSayHi();
+
+ assertSingleImplementation(TransformAbstract.class, "doSayHi", true);
+
+ System.out.println("redefining TransformAbstract");
+ Redefinition.doCommonClassRedefinition(TransformAbstract.class, CLASS_BYTES, DEX_BYTES);
+
+ t1.doSayHi();
+ }
+
+ private static native boolean hasSingleImplementation(Class<?> clazz, String method_name);
+ private static void assertSingleImplementation(Class<?> clazz, String method_name, boolean b) {
+ if (hasSingleImplementation(clazz, method_name) != b) {
+ System.out.println(clazz + "." + method_name +
+ " doesn't have single implementation value of " + b);
+ }
+ }
+}
diff --git a/test/1938-transform-abstract-single-impl/src/art/Redefinition.java b/test/1938-transform-abstract-single-impl/src/art/Redefinition.java
new file mode 100644
index 0000000..56d2938
--- /dev/null
+++ b/test/1938-transform-abstract-single-impl/src/art/Redefinition.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.ArrayList;
+// Common Redefinition functions. Placed here for use by CTS
+public class Redefinition {
+ public static final class CommonClassDefinition {
+ public final Class<?> target;
+ public final byte[] class_file_bytes;
+ public final byte[] dex_file_bytes;
+
+ public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
+ this.target = target;
+ this.class_file_bytes = class_file_bytes;
+ this.dex_file_bytes = dex_file_bytes;
+ }
+ }
+
+ // A set of possible test configurations. Test should set this if they need to.
+ // This must be kept in sync with the defines in ti-agent/common_helper.cc
+ public static enum Config {
+ COMMON_REDEFINE(0),
+ COMMON_RETRANSFORM(1),
+ COMMON_TRANSFORM(2);
+
+ private final int val;
+ private Config(int val) {
+ this.val = val;
+ }
+ }
+
+ public static void setTestConfiguration(Config type) {
+ nativeSetTestConfiguration(type.val);
+ }
+
+ private static native void nativeSetTestConfiguration(int type);
+
+ // Transforms the class
+ public static native void doCommonClassRedefinition(Class<?> target,
+ byte[] classfile,
+ byte[] dexfile);
+
+ public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
+ ArrayList<Class<?>> classes = new ArrayList<>();
+ ArrayList<byte[]> class_files = new ArrayList<>();
+ ArrayList<byte[]> dex_files = new ArrayList<>();
+
+ for (CommonClassDefinition d : defs) {
+ classes.add(d.target);
+ class_files.add(d.class_file_bytes);
+ dex_files.add(d.dex_file_bytes);
+ }
+ doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
+ class_files.toArray(new byte[0][]),
+ dex_files.toArray(new byte[0][]));
+ }
+
+ public static void addMultiTransformationResults(CommonClassDefinition... defs) {
+ for (CommonClassDefinition d : defs) {
+ addCommonTransformationResult(d.target.getCanonicalName(),
+ d.class_file_bytes,
+ d.dex_file_bytes);
+ }
+ }
+
+ public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
+ byte[][] classfiles,
+ byte[][] dexfiles);
+ public static native void doCommonClassRetransformation(Class<?>... target);
+ public static native void setPopRetransformations(boolean pop);
+ public static native void popTransformationFor(String name);
+ public static native void enableCommonRetransformation(boolean enable);
+ public static native void addCommonTransformationResult(String target_name,
+ byte[] class_bytes,
+ byte[] dex_bytes);
+}
diff --git a/test/1939-proxy-frames/expected.txt b/test/1939-proxy-frames/expected.txt
new file mode 100644
index 0000000..a4c97c9
--- /dev/null
+++ b/test/1939-proxy-frames/expected.txt
@@ -0,0 +1,8 @@
+Running public abstract void art.Test1939$Foo.InterfaceProxyMethod(java.lang.Runnable) with "GetThis" on remote thread.
+"GetThis" on public abstract void art.Test1939$Foo.InterfaceProxyMethod(java.lang.Runnable) got value: Proxy for [interface art.Test1939$Foo]
+Running public abstract void art.Test1939$Foo.InterfaceProxyMethod(java.lang.Runnable) with "GetLocalReference0" on remote thread.
+"GetLocalReference0" on public abstract void art.Test1939$Foo.InterfaceProxyMethod(java.lang.Runnable) failed due to JVMTI_ERROR_OPAQUE_FRAME
+Running public abstract void art.Test1939$Foo.InterfaceProxyMethod(java.lang.Runnable) with "GetProxyFrameLocation" on remote thread.
+"GetProxyFrameLocation" on public abstract void art.Test1939$Foo.InterfaceProxyMethod(java.lang.Runnable) got value: -1
+Running public abstract void art.Test1939$Foo.InterfaceProxyMethod(java.lang.Runnable) with "GetProxyFrameMethod" on remote thread.
+"GetProxyFrameMethod" on public abstract void art.Test1939$Foo.InterfaceProxyMethod(java.lang.Runnable) got value: public final void $Proxy0.InterfaceProxyMethod(java.lang.Runnable)
diff --git a/test/1939-proxy-frames/info.txt b/test/1939-proxy-frames/info.txt
new file mode 100644
index 0000000..9fc3d62
--- /dev/null
+++ b/test/1939-proxy-frames/info.txt
@@ -0,0 +1,2 @@
+Test for jvmti get local instance
+
diff --git a/test/1939-proxy-frames/local_instance.cc b/test/1939-proxy-frames/local_instance.cc
new file mode 100644
index 0000000..dc833bf
--- /dev/null
+++ b/test/1939-proxy-frames/local_instance.cc
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+#include <pthread.h>
+#include <stdio.h>
+#include <vector>
+
+#include "android-base/logging.h"
+#include "jni.h"
+#include "scoped_local_ref.h"
+#include "scoped_primitive_array.h"
+
+#include "jvmti.h"
+
+// Test infrastructure
+#include "jvmti_helper.h"
+#include "test_env.h"
+
+namespace art {
+namespace Test1939ProxyFrames {
+
+extern "C" JNIEXPORT jobject Java_art_Test1939_GetFrameMethod(JNIEnv* env,
+ jclass,
+ jthread thr,
+ jint depth) {
+ jmethodID m = nullptr;
+ jlong loc = -1;
+ if (JvmtiErrorToException(env, jvmti_env, jvmti_env->GetFrameLocation(thr, depth, &m, &loc))) {
+ return nullptr;
+ }
+ jclass klass = nullptr;
+ if (JvmtiErrorToException(env, jvmti_env, jvmti_env->GetMethodDeclaringClass(m, &klass))) {
+ return nullptr;
+ }
+ jobject res = env->ToReflectedMethod(klass, m, false);
+ env->DeleteLocalRef(klass);
+ return res;
+}
+
+extern "C" JNIEXPORT jlong Java_art_Test1939_GetFrameLocation(JNIEnv* env,
+ jclass,
+ jthread thr,
+ jint depth) {
+ jmethodID m = nullptr;
+ jlong loc = -1;
+ JvmtiErrorToException(env, jvmti_env, jvmti_env->GetFrameLocation(thr, depth, &m, &loc));
+ return loc;
+}
+
+} // namespace Test1939ProxyFrames
+} // namespace art
+
diff --git a/test/1939-proxy-frames/run b/test/1939-proxy-frames/run
new file mode 100755
index 0000000..51875a7
--- /dev/null
+++ b/test/1939-proxy-frames/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Ask for stack traces to be dumped to a file rather than to stdout.
+./default-run "$@" --jvmti
diff --git a/test/1939-proxy-frames/src/Main.java b/test/1939-proxy-frames/src/Main.java
new file mode 100644
index 0000000..85cab34
--- /dev/null
+++ b/test/1939-proxy-frames/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ art.Test1939.run();
+ }
+}
diff --git a/test/1939-proxy-frames/src/art/Breakpoint.java b/test/1939-proxy-frames/src/art/Breakpoint.java
new file mode 100644
index 0000000..bbb89f7
--- /dev/null
+++ b/test/1939-proxy-frames/src/art/Breakpoint.java
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Executable;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.Objects;
+
+public class Breakpoint {
+ public static class Manager {
+ public static class BP {
+ public final Executable method;
+ public final long location;
+
+ public BP(Executable method) {
+ this(method, getStartLocation(method));
+ }
+
+ public BP(Executable method, long location) {
+ this.method = method;
+ this.location = location;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ return (other instanceof BP) &&
+ method.equals(((BP)other).method) &&
+ location == ((BP)other).location;
+ }
+
+ @Override
+ public String toString() {
+ return method.toString() + " @ " + getLine();
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(method, location);
+ }
+
+ public int getLine() {
+ try {
+ LineNumber[] lines = getLineNumberTable(method);
+ int best = -1;
+ for (LineNumber l : lines) {
+ if (l.location > location) {
+ break;
+ } else {
+ best = l.line;
+ }
+ }
+ return best;
+ } catch (Exception e) {
+ return -1;
+ }
+ }
+ }
+
+ private Set<BP> breaks = new HashSet<>();
+
+ public void setBreakpoints(BP... bs) {
+ for (BP b : bs) {
+ if (breaks.add(b)) {
+ Breakpoint.setBreakpoint(b.method, b.location);
+ }
+ }
+ }
+ public void setBreakpoint(Executable method, long location) {
+ setBreakpoints(new BP(method, location));
+ }
+
+ public void clearBreakpoints(BP... bs) {
+ for (BP b : bs) {
+ if (breaks.remove(b)) {
+ Breakpoint.clearBreakpoint(b.method, b.location);
+ }
+ }
+ }
+ public void clearBreakpoint(Executable method, long location) {
+ clearBreakpoints(new BP(method, location));
+ }
+
+ public void clearAllBreakpoints() {
+ clearBreakpoints(breaks.toArray(new BP[0]));
+ }
+ }
+
+ public static void startBreakpointWatch(Class<?> methodClass,
+ Executable breakpointReached,
+ Thread thr) {
+ startBreakpointWatch(methodClass, breakpointReached, false, thr);
+ }
+
+ /**
+ * Enables the trapping of breakpoint events.
+ *
+ * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
+ */
+ public static native void startBreakpointWatch(Class<?> methodClass,
+ Executable breakpointReached,
+ boolean allowRecursive,
+ Thread thr);
+ public static native void stopBreakpointWatch(Thread thr);
+
+ public static final class LineNumber implements Comparable<LineNumber> {
+ public final long location;
+ public final int line;
+
+ private LineNumber(long loc, int line) {
+ this.location = loc;
+ this.line = line;
+ }
+
+ public boolean equals(Object other) {
+ return other instanceof LineNumber && ((LineNumber)other).line == line &&
+ ((LineNumber)other).location == location;
+ }
+
+ public int compareTo(LineNumber other) {
+ int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
+ if (v != 0) {
+ return v;
+ } else {
+ return Long.valueOf(location).compareTo(Long.valueOf(other.location));
+ }
+ }
+ }
+
+ public static native void setBreakpoint(Executable m, long loc);
+ public static void setBreakpoint(Executable m, LineNumber l) {
+ setBreakpoint(m, l.location);
+ }
+
+ public static native void clearBreakpoint(Executable m, long loc);
+ public static void clearBreakpoint(Executable m, LineNumber l) {
+ clearBreakpoint(m, l.location);
+ }
+
+ private static native Object[] getLineNumberTableNative(Executable m);
+ public static LineNumber[] getLineNumberTable(Executable m) {
+ Object[] nativeTable = getLineNumberTableNative(m);
+ long[] location = (long[])(nativeTable[0]);
+ int[] lines = (int[])(nativeTable[1]);
+ if (lines.length != location.length) {
+ throw new Error("Lines and locations have different lengths!");
+ }
+ LineNumber[] out = new LineNumber[lines.length];
+ for (int i = 0; i < lines.length; i++) {
+ out[i] = new LineNumber(location[i], lines[i]);
+ }
+ return out;
+ }
+
+ public static native long getStartLocation(Executable m);
+
+ public static int locationToLine(Executable m, long location) {
+ try {
+ Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
+ int best = -1;
+ for (Breakpoint.LineNumber l : lines) {
+ if (l.location > location) {
+ break;
+ } else {
+ best = l.line;
+ }
+ }
+ return best;
+ } catch (Exception e) {
+ return -1;
+ }
+ }
+
+ public static long lineToLocation(Executable m, int line) throws Exception {
+ try {
+ Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
+ for (Breakpoint.LineNumber l : lines) {
+ if (l.line == line) {
+ return l.location;
+ }
+ }
+ throw new Exception("Unable to find line " + line + " in " + m);
+ } catch (Exception e) {
+ throw new Exception("Unable to get line number info for " + m, e);
+ }
+ }
+}
+
diff --git a/test/1939-proxy-frames/src/art/Locals.java b/test/1939-proxy-frames/src/art/Locals.java
new file mode 100644
index 0000000..22e21be
--- /dev/null
+++ b/test/1939-proxy-frames/src/art/Locals.java
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Executable;
+import java.util.Objects;
+
+public class Locals {
+ public static native void EnableLocalVariableAccess();
+
+ public static class VariableDescription {
+ public final long start_location;
+ public final int length;
+ public final String name;
+ public final String signature;
+ public final String generic_signature;
+ public final int slot;
+
+ public VariableDescription(
+ long start, int length, String name, String sig, String gen_sig, int slot) {
+ this.start_location = start;
+ this.length = length;
+ this.name = name;
+ this.signature = sig;
+ this.generic_signature = gen_sig;
+ this.slot = slot;
+ }
+
+ @Override
+ public String toString() {
+ return String.format(
+ "VariableDescription { " +
+ "Sig: '%s', Name: '%s', Gen_sig: '%s', slot: %d, start: %d, len: %d" +
+ "}",
+ this.signature,
+ this.name,
+ this.generic_signature,
+ this.slot,
+ this.start_location,
+ this.length);
+ }
+ public boolean equals(Object other) {
+ if (!(other instanceof VariableDescription)) {
+ return false;
+ } else {
+ VariableDescription v = (VariableDescription)other;
+ return Objects.equals(v.signature, signature) &&
+ Objects.equals(v.name, name) &&
+ Objects.equals(v.generic_signature, generic_signature) &&
+ v.slot == slot &&
+ v.start_location == start_location &&
+ v.length == length;
+ }
+ }
+ public int hashCode() {
+ return Objects.hash(this.signature, this.name, this.generic_signature, this.slot,
+ this.start_location, this.length);
+ }
+ }
+
+ public static native VariableDescription[] GetLocalVariableTable(Executable e);
+
+ public static VariableDescription GetVariableAtLine(
+ Executable e, String name, String sig, int line) throws Exception {
+ return GetVariableAtLocation(e, name, sig, Breakpoint.lineToLocation(e, line));
+ }
+
+ public static VariableDescription GetVariableAtLocation(
+ Executable e, String name, String sig, long loc) {
+ VariableDescription[] vars = GetLocalVariableTable(e);
+ for (VariableDescription var : vars) {
+ if (var.start_location <= loc &&
+ var.length + var.start_location > loc &&
+ var.name.equals(name) &&
+ var.signature.equals(sig)) {
+ return var;
+ }
+ }
+ throw new Error(
+ "Unable to find variable " + name + " (sig: " + sig + ") in " + e + " at loc " + loc);
+ }
+
+ public static native int GetLocalVariableInt(Thread thr, int depth, int slot);
+ public static native long GetLocalVariableLong(Thread thr, int depth, int slot);
+ public static native float GetLocalVariableFloat(Thread thr, int depth, int slot);
+ public static native double GetLocalVariableDouble(Thread thr, int depth, int slot);
+ public static native Object GetLocalVariableObject(Thread thr, int depth, int slot);
+ public static native Object GetLocalInstance(Thread thr, int depth);
+
+ public static void SetLocalVariableInt(Thread thr, int depth, int slot, Object val) {
+ SetLocalVariableInt(thr, depth, slot, ((Number)val).intValue());
+ }
+ public static void SetLocalVariableLong(Thread thr, int depth, int slot, Object val) {
+ SetLocalVariableLong(thr, depth, slot, ((Number)val).longValue());
+ }
+ public static void SetLocalVariableFloat(Thread thr, int depth, int slot, Object val) {
+ SetLocalVariableFloat(thr, depth, slot, ((Number)val).floatValue());
+ }
+ public static void SetLocalVariableDouble(Thread thr, int depth, int slot, Object val) {
+ SetLocalVariableDouble(thr, depth, slot, ((Number)val).doubleValue());
+ }
+ public static native void SetLocalVariableInt(Thread thr, int depth, int slot, int val);
+ public static native void SetLocalVariableLong(Thread thr, int depth, int slot, long val);
+ public static native void SetLocalVariableFloat(Thread thr, int depth, int slot, float val);
+ public static native void SetLocalVariableDouble(Thread thr, int depth, int slot, double val);
+ public static native void SetLocalVariableObject(Thread thr, int depth, int slot, Object val);
+}
diff --git a/test/1939-proxy-frames/src/art/StackTrace.java b/test/1939-proxy-frames/src/art/StackTrace.java
new file mode 100644
index 0000000..2ea2f20
--- /dev/null
+++ b/test/1939-proxy-frames/src/art/StackTrace.java
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Field;
+import java.lang.reflect.Executable;
+
+public class StackTrace {
+ public static class StackFrameData {
+ public final Thread thr;
+ public final Executable method;
+ public final long current_location;
+ public final int depth;
+
+ public StackFrameData(Thread thr, Executable e, long loc, int depth) {
+ this.thr = thr;
+ this.method = e;
+ this.current_location = loc;
+ this.depth = depth;
+ }
+ @Override
+ public String toString() {
+ return String.format(
+ "StackFrameData { thr: '%s', method: '%s', loc: %d, depth: %d }",
+ this.thr,
+ this.method,
+ this.current_location,
+ this.depth);
+ }
+ }
+
+ public static native int GetStackDepth(Thread thr);
+
+ private static native StackFrameData[] nativeGetStackTrace(Thread thr);
+
+ public static StackFrameData[] GetStackTrace(Thread thr) {
+ // The RI seems to give inconsistent (and sometimes nonsensical) results if the thread is not
+ // suspended. The spec says that not being suspended is fine but since we want this to be
+ // consistent we will suspend for the RI.
+ boolean suspend_thread =
+ !System.getProperty("java.vm.name").equals("Dalvik") &&
+ !thr.equals(Thread.currentThread()) &&
+ !Suspension.isSuspended(thr);
+ if (suspend_thread) {
+ Suspension.suspend(thr);
+ }
+ StackFrameData[] out = nativeGetStackTrace(thr);
+ if (suspend_thread) {
+ Suspension.resume(thr);
+ }
+ return out;
+ }
+}
+
diff --git a/test/1939-proxy-frames/src/art/Suspension.java b/test/1939-proxy-frames/src/art/Suspension.java
new file mode 100644
index 0000000..16e62cc
--- /dev/null
+++ b/test/1939-proxy-frames/src/art/Suspension.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+public class Suspension {
+ // Suspends a thread using jvmti.
+ public native static void suspend(Thread thr);
+
+ // Resumes a thread using jvmti.
+ public native static void resume(Thread thr);
+
+ public native static boolean isSuspended(Thread thr);
+
+ public native static int[] suspendList(Thread... threads);
+ public native static int[] resumeList(Thread... threads);
+}
diff --git a/test/1939-proxy-frames/src/art/Test1939.java b/test/1939-proxy-frames/src/art/Test1939.java
new file mode 100644
index 0000000..83d0d2c
--- /dev/null
+++ b/test/1939-proxy-frames/src/art/Test1939.java
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Executable;
+import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+import java.nio.ByteBuffer;
+import java.util.concurrent.Semaphore;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Set;
+import java.util.function.Function;
+import java.util.function.Predicate;
+import java.util.function.Supplier;
+import java.util.function.Consumer;
+
+public class Test1939 {
+ public static interface SafepointFunction {
+ public void invoke(
+ Thread thread,
+ Method target,
+ int depth) throws Exception;
+ }
+
+ public static interface GetterFunction {
+ public Object GetVar(Thread t, int depth);
+ }
+
+ public static SafepointFunction NamedGet(final String type, final GetterFunction get) {
+ return new SafepointFunction() {
+ public void invoke(Thread t, Method method, int depth) {
+ try {
+ Object res = get.GetVar(t, depth);
+ System.out.println(this + " on " + method + " got value: " + res);
+ } catch (Exception e) {
+ System.out.println(this + " on " + method + " failed due to " + e.getMessage());
+ }
+ }
+ public String toString() {
+ return "\"Get" + type + "\"";
+ }
+ };
+ }
+
+ public static class TestCase {
+ public final Object thiz;
+ public final Method target;
+
+ public TestCase(Method target) {
+ this(null, target);
+ }
+ public TestCase(Object thiz, Method target) {
+ this.thiz = thiz;
+ this.target = target;
+ }
+
+ public static class ThreadPauser implements Runnable {
+ public final Semaphore sem_wakeup_main;
+ public final Semaphore sem_wait;
+
+ public ThreadPauser() {
+ sem_wakeup_main = new Semaphore(0);
+ sem_wait = new Semaphore(0);
+ }
+
+ public void run() {
+ try {
+ sem_wakeup_main.release();
+ sem_wait.acquire();
+ } catch (Exception e) {
+ throw new Error("Error with semaphores!", e);
+ }
+ }
+
+ public void waitForOtherThreadToPause() throws Exception {
+ sem_wakeup_main.acquire();
+ }
+
+ public void wakeupOtherThread() throws Exception {
+ sem_wait.release();
+ }
+ }
+
+ public void exec(final SafepointFunction safepoint) throws Exception {
+ System.out.println("Running " + target + " with " + safepoint + " on remote thread.");
+ final ThreadPauser pause = new ThreadPauser();
+ Thread remote = new Thread(
+ () -> {
+ try {
+ target.invoke(thiz, pause);
+ } catch (Exception e) {
+ throw new Error("Error invoking remote thread " + Thread.currentThread(), e);
+ }
+ },
+ "remote thread for " + target + " with " + safepoint);
+ remote.start();
+ pause.waitForOtherThreadToPause();
+ try {
+ Suspension.suspend(remote);
+ StackTrace.StackFrameData frame = findStackFrame(remote);
+ safepoint.invoke(remote, target, frame.depth);
+ } finally {
+ Suspension.resume(remote);
+ pause.wakeupOtherThread();
+ remote.join();
+ }
+ }
+
+ private StackTrace.StackFrameData findStackFrame(Thread thr) {
+ for (StackTrace.StackFrameData frame : StackTrace.GetStackTrace(thr)) {
+ if (frame.method.equals(target) ||
+ (frame.method.getName().equals(target.getName()) &&
+ Arrays.deepEquals(frame.method.getParameterTypes(), target.getParameterTypes()) &&
+ ((Method)frame.method).getReturnType().equals(target.getReturnType()))) {
+ return frame;
+ }
+ }
+ throw new Error("Unable to find stack frame in method " + target + " on thread " + thr);
+ }
+ }
+
+ public static Method getMethod(Class<?> klass, String name) throws Exception {
+ return klass.getDeclaredMethod(name, Runnable.class);
+ }
+
+ public static interface Foo {
+ public void InterfaceProxyMethod(Runnable r);
+ }
+
+ public static Object getProxyObject(final Class... k) {
+ return Proxy.newProxyInstance(
+ Test1939.class.getClassLoader(),
+ k,
+ (p, m, a) -> {
+ if (m.getName().equals("toString")) {
+ return "Proxy for " + Arrays.toString(k);
+ } else {
+ ((Runnable)a[0]).run();
+ return null;
+ }
+ });
+ }
+
+ public static void run() throws Exception {
+ Locals.EnableLocalVariableAccess();
+ TestCase test = new TestCase(
+ getProxyObject(Foo.class), getMethod(Foo.class, "InterfaceProxyMethod"));
+ test.exec(NamedGet("This", Locals::GetLocalInstance));
+ test.exec(NamedGet("LocalReference0", (t, d) -> Locals.GetLocalVariableObject(t, d, 0)));
+ test.exec(NamedGet("ProxyFrameLocation", (t, d) -> Long.valueOf(GetFrameLocation(t, d))));
+ test.exec(NamedGet("ProxyFrameMethod", Test1939::GetFrameMethod));
+ }
+
+ public static native long GetFrameLocation(Thread thr, int depth);
+ public static native Executable GetFrameMethod(Thread thr, int depth);
+}
+
diff --git a/test/442-checker-constant-folding/build b/test/442-checker-constant-folding/build
index 49292c9..947ec9a 100755
--- a/test/442-checker-constant-folding/build
+++ b/test/442-checker-constant-folding/build
@@ -20,4 +20,7 @@
# Also disable desugar because it is missing in jack platform builds.
export DESUGAR=false
+# See b/65168732
+export DX=$ANDROID_HOST_OUT/bin/dx
+
./default-build "$@"
diff --git a/test/450-checker-types/build b/test/450-checker-types/build
index 49292c9..3721955 100755
--- a/test/450-checker-types/build
+++ b/test/450-checker-types/build
@@ -20,4 +20,7 @@
# Also disable desugar because it is missing in jack platform builds.
export DESUGAR=false
+# See b/65168732
+export USE_D8=false
+
./default-build "$@"
diff --git a/test/458-checker-instruct-simplification/build b/test/458-checker-instruct-simplification/build
index 49292c9..3721955 100755
--- a/test/458-checker-instruct-simplification/build
+++ b/test/458-checker-instruct-simplification/build
@@ -20,4 +20,7 @@
# Also disable desugar because it is missing in jack platform builds.
export DESUGAR=false
+# See b/65168732
+export USE_D8=false
+
./default-build "$@"
diff --git a/test/458-checker-instruct-simplification/smali/SmaliTests.smali b/test/458-checker-instruct-simplification/smali/SmaliTests.smali
index a8d7d94..d987398 100644
--- a/test/458-checker-instruct-simplification/smali/SmaliTests.smali
+++ b/test/458-checker-instruct-simplification/smali/SmaliTests.smali
@@ -331,70 +331,54 @@
# Test simplification of the `~~var` pattern.
# The transformation tested is implemented in `InstructionSimplifierVisitor::VisitNot`.
-## CHECK-START: long SmaliTests.NotNot1(long) instruction_simplifier (before)
+## CHECK-START: long SmaliTests.$noinline$NotNot1(long) instruction_simplifier (before)
## CHECK-DAG: <<Arg:j\d+>> ParameterValue
## CHECK-DAG: <<Not1:j\d+>> Not [<<Arg>>]
## CHECK-DAG: <<Not2:j\d+>> Not [<<Not1>>]
## CHECK-DAG: Return [<<Not2>>]
-## CHECK-START: long SmaliTests.NotNot1(long) instruction_simplifier (after)
+## CHECK-START: long SmaliTests.$noinline$NotNot1(long) instruction_simplifier (after)
## CHECK-DAG: <<Arg:j\d+>> ParameterValue
## CHECK-DAG: Return [<<Arg>>]
-## CHECK-START: long SmaliTests.NotNot1(long) instruction_simplifier (after)
+## CHECK-START: long SmaliTests.$noinline$NotNot1(long) instruction_simplifier (after)
## CHECK-NOT: Not
-.method public static NotNot1(J)J
+.method public static $noinline$NotNot1(J)J
.registers 4
.param p0, "arg" # J
.prologue
- sget-boolean v0, LMain;->doThrow:Z
- # if (doThrow) throw new Error();
- if-eqz v0, :cond_a
- new-instance v0, Ljava/lang/Error;
- invoke-direct {v0}, Ljava/lang/Error;-><init>()V
- throw v0
-
- :cond_a
# return ~~arg
not-long v0, p0
not-long v0, v0
return-wide v0
.end method
-## CHECK-START: int SmaliTests.NotNot2(int) instruction_simplifier (before)
+## CHECK-START: int SmaliTests.$noinline$NotNot2(int) instruction_simplifier (before)
## CHECK-DAG: <<Arg:i\d+>> ParameterValue
## CHECK-DAG: <<Not1:i\d+>> Not [<<Arg>>]
## CHECK-DAG: <<Not2:i\d+>> Not [<<Not1>>]
## CHECK-DAG: <<Add:i\d+>> Add [<<Not2>>,<<Not1>>]
## CHECK-DAG: Return [<<Add>>]
-## CHECK-START: int SmaliTests.NotNot2(int) instruction_simplifier (after)
+## CHECK-START: int SmaliTests.$noinline$NotNot2(int) instruction_simplifier (after)
## CHECK-DAG: <<Arg:i\d+>> ParameterValue
## CHECK-DAG: <<Not:i\d+>> Not [<<Arg>>]
## CHECK-DAG: <<Add:i\d+>> Add [<<Arg>>,<<Not>>]
## CHECK-DAG: Return [<<Add>>]
-## CHECK-START: int SmaliTests.NotNot2(int) instruction_simplifier (after)
+## CHECK-START: int SmaliTests.$noinline$NotNot2(int) instruction_simplifier (after)
## CHECK: Not
## CHECK-NOT: Not
-.method public static NotNot2(I)I
+.method public static $noinline$NotNot2(I)I
.registers 3
.param p0, "arg" # I
.prologue
- sget-boolean v1, LMain;->doThrow:Z
- # if (doThrow) throw new Error();
- if-eqz v1, :cond_a
- new-instance v1, Ljava/lang/Error;
- invoke-direct {v1}, Ljava/lang/Error;-><init>()V
- throw v1
-
- :cond_a
# temp = ~arg; return temp + ~temp;
not-int v0, p0
not-int v1, v0
@@ -407,31 +391,31 @@
# both negations can be removed but we only expect the simplifier to
# remove the second.
-## CHECK-START: boolean SmaliTests.NotNotBool(boolean) instruction_simplifier (before)
+## CHECK-START: boolean SmaliTests.$noinline$NotNotBool(boolean) instruction_simplifier (before)
## CHECK-DAG: <<Arg:z\d+>> ParameterValue
## CHECK-DAG: <<Const1:i\d+>> IntConstant 1
## CHECK-DAG: <<Result:z\d+>> InvokeStaticOrDirect
## CHECK-DAG: <<NotResult:i\d+>> Xor [<<Result>>,<<Const1>>]
## CHECK-DAG: Return [<<NotResult>>]
-## CHECK-START: boolean SmaliTests.NotNotBool(boolean) instruction_simplifier (after)
+## CHECK-START: boolean SmaliTests.$noinline$NotNotBool(boolean) instruction_simplifier (after)
## CHECK-DAG: <<Arg:z\d+>> ParameterValue
## CHECK-DAG: <<Result:z\d+>> InvokeStaticOrDirect
## CHECK-DAG: <<NotResult:z\d+>> BooleanNot [<<Result>>]
## CHECK-DAG: Return [<<NotResult>>]
-## CHECK-START: boolean SmaliTests.NotNotBool(boolean) instruction_simplifier$after_inlining (before)
+## CHECK-START: boolean SmaliTests.$noinline$NotNotBool(boolean) instruction_simplifier$after_inlining (before)
## CHECK-DAG: <<Arg:z\d+>> ParameterValue
## CHECK-DAG: <<NotArg:z\d+>> BooleanNot [<<Arg>>]
## CHECK-DAG: <<NotNotArg:z\d+>> BooleanNot [<<NotArg>>]
## CHECK-DAG: Return [<<NotNotArg>>]
-## CHECK-START: boolean SmaliTests.NotNotBool(boolean) instruction_simplifier$after_inlining (after)
+## CHECK-START: boolean SmaliTests.$noinline$NotNotBool(boolean) instruction_simplifier$after_inlining (after)
## CHECK-DAG: <<Arg:z\d+>> ParameterValue
## CHECK-DAG: <<NotArg:z\d+>> BooleanNot [<<Arg>>]
## CHECK-DAG: Return [<<Arg>>]
-## CHECK-START: boolean SmaliTests.NotNotBool(boolean) dead_code_elimination$final (after)
+## CHECK-START: boolean SmaliTests.$noinline$NotNotBool(boolean) dead_code_elimination$final (after)
## CHECK-DAG: <<Arg:z\d+>> ParameterValue
## CHECK-DAG: Return [<<Arg>>]
@@ -447,20 +431,12 @@
.end method
-.method public static NotNotBool(Z)Z
+.method public static $noinline$NotNotBool(Z)Z
.registers 2
.param p0, "arg" # Z
.prologue
- sget-boolean v0, LMain;->doThrow:Z
- # if (doThrow) throw new Error();
- if-eqz v0, :cond_a
- new-instance v0, Ljava/lang/Error;
- invoke-direct {v0}, Ljava/lang/Error;-><init>()V
- throw v0
-
- :cond_a
# return !Negate(arg)
invoke-static {p0}, LSmaliTests;->NegateValue(Z)Z
move-result v0
diff --git a/test/458-checker-instruct-simplification/src/Main.java b/test/458-checker-instruct-simplification/src/Main.java
index 5c36ce9..20858f5 100644
--- a/test/458-checker-instruct-simplification/src/Main.java
+++ b/test/458-checker-instruct-simplification/src/Main.java
@@ -18,8 +18,6 @@
public class Main {
- static boolean doThrow = false;
-
public static void assertBooleanEquals(boolean expected, boolean result) {
if (expected != result) {
throw new Error("Expected: " + expected + ", found: " + result);
@@ -74,7 +72,6 @@
/// CHECK-NOT: Add
public static long $noinline$Add0(long arg) {
- if (doThrow) { throw new Error(); }
return 0 + arg;
}
@@ -97,7 +94,6 @@
/// CHECK-DAG: Return [<<Add>>]
public static int $noinline$AddAddSubAddConst(int arg) {
- if (doThrow) { throw new Error(); }
return arg + 1 + 2 - 3 + 4;
}
@@ -115,7 +111,6 @@
/// CHECK-NOT: And
public static int $noinline$AndAllOnes(int arg) {
- if (doThrow) { throw new Error(); }
return arg & -1;
}
@@ -137,7 +132,6 @@
/// CHECK-NOT: And
public static int $noinline$UShr28And15(int arg) {
- if (doThrow) { throw new Error(); }
return (arg >>> 28) & 15;
}
@@ -159,7 +153,6 @@
/// CHECK-NOT: And
public static long $noinline$UShr60And15(long arg) {
- if (doThrow) { throw new Error(); }
return (arg >>> 60) & 15;
}
@@ -180,7 +173,6 @@
/// CHECK-DAG: Return [<<And>>]
public static int $noinline$UShr28And7(int arg) {
- if (doThrow) { throw new Error(); }
return (arg >>> 28) & 7;
}
@@ -201,7 +193,6 @@
/// CHECK-DAG: Return [<<And>>]
public static long $noinline$UShr60And7(long arg) {
- if (doThrow) { throw new Error(); }
return (arg >>> 60) & 7;
}
@@ -224,7 +215,6 @@
/// CHECK-NOT: And
public static int $noinline$Shr24And255(int arg) {
- if (doThrow) { throw new Error(); }
return (arg >> 24) & 255;
}
@@ -247,7 +237,6 @@
/// CHECK-NOT: And
public static long $noinline$Shr56And255(long arg) {
- if (doThrow) { throw new Error(); }
return (arg >> 56) & 255;
}
@@ -268,7 +257,6 @@
/// CHECK-DAG: Return [<<And>>]
public static int $noinline$Shr24And127(int arg) {
- if (doThrow) { throw new Error(); }
return (arg >> 24) & 127;
}
@@ -289,7 +277,6 @@
/// CHECK-DAG: Return [<<And>>]
public static long $noinline$Shr56And127(long arg) {
- if (doThrow) { throw new Error(); }
return (arg >> 56) & 127;
}
@@ -307,7 +294,6 @@
/// CHECK-NOT: Div
public static long $noinline$Div1(long arg) {
- if (doThrow) { throw new Error(); }
return arg / 1;
}
@@ -326,7 +312,6 @@
/// CHECK-NOT: Div
public static int $noinline$DivN1(int arg) {
- if (doThrow) { throw new Error(); }
return arg / -1;
}
@@ -344,7 +329,6 @@
/// CHECK-NOT: Mul
public static long $noinline$Mul1(long arg) {
- if (doThrow) { throw new Error(); }
return arg * 1;
}
@@ -363,7 +347,6 @@
/// CHECK-NOT: Mul
public static int $noinline$MulN1(int arg) {
- if (doThrow) { throw new Error(); }
return arg * -1;
}
@@ -383,7 +366,6 @@
/// CHECK-NOT: Mul
public static long $noinline$MulPowerOfTwo128(long arg) {
- if (doThrow) { throw new Error(); }
return arg * 128;
}
@@ -404,7 +386,6 @@
/// CHECK-DAG: Return [<<Mul>>]
public static long $noinline$MulMulMulConst(long arg) {
- if (doThrow) { throw new Error(); }
return 10 * arg * 11 * 12;
}
@@ -422,7 +403,6 @@
/// CHECK-NOT: Or
public static int $noinline$Or0(int arg) {
- if (doThrow) { throw new Error(); }
return arg | 0;
}
@@ -439,7 +419,6 @@
/// CHECK-NOT: Or
public static long $noinline$OrSame(long arg) {
- if (doThrow) { throw new Error(); }
return arg | arg;
}
@@ -457,7 +436,6 @@
/// CHECK-NOT: Shl
public static int $noinline$Shl0(int arg) {
- if (doThrow) { throw new Error(); }
return arg << 0;
}
@@ -475,7 +453,6 @@
/// CHECK-NOT: Shr
public static long $noinline$Shr0(long arg) {
- if (doThrow) { throw new Error(); }
return arg >> 0;
}
@@ -493,7 +470,6 @@
/// CHECK-NOT: Shr
public static long $noinline$Shr64(long arg) {
- if (doThrow) { throw new Error(); }
return arg >> 64;
}
@@ -511,7 +487,6 @@
/// CHECK-NOT: Sub
public static long $noinline$Sub0(long arg) {
- if (doThrow) { throw new Error(); }
return arg - 0;
}
@@ -530,7 +505,6 @@
/// CHECK-NOT: Sub
public static int $noinline$SubAliasNeg(int arg) {
- if (doThrow) { throw new Error(); }
return 0 - arg;
}
@@ -549,7 +523,6 @@
/// CHECK-DAG: Return [<<Sub>>]
public static int $noinline$SubAddConst1(int arg) {
- if (doThrow) { throw new Error(); }
return 5 - arg + 6;
}
@@ -568,7 +541,6 @@
/// CHECK-DAG: Return [<<Sub>>]
public static int $noinline$SubAddConst2(int arg) {
- if (doThrow) { throw new Error(); }
return 14 - (arg + 13);
}
@@ -587,7 +559,6 @@
/// CHECK-DAG: Return [<<Add>>]
public static long $noinline$SubSubConst(long arg) {
- if (doThrow) { throw new Error(); }
return 17 - (18 - arg);
}
@@ -605,7 +576,6 @@
/// CHECK-NOT: UShr
public static long $noinline$UShr0(long arg) {
- if (doThrow) { throw new Error(); }
return arg >>> 0;
}
@@ -623,7 +593,6 @@
/// CHECK-NOT: Xor
public static int $noinline$Xor0(int arg) {
- if (doThrow) { throw new Error(); }
return arg ^ 0;
}
@@ -642,7 +611,6 @@
/// CHECK-NOT: Xor
public static int $noinline$XorAllOnes(int arg) {
- if (doThrow) { throw new Error(); }
return arg ^ -1;
}
@@ -670,7 +638,6 @@
/// CHECK-DAG: Return [<<Neg>>]
public static int $noinline$AddNegs1(int arg1, int arg2) {
- if (doThrow) { throw new Error(); }
return -arg1 + -arg2;
}
@@ -716,7 +683,6 @@
/// CHECK-DAG: Return [<<Or>>]
public static int $noinline$AddNegs2(int arg1, int arg2) {
- if (doThrow) { throw new Error(); }
int temp1 = -arg1;
int temp2 = -arg2;
return (temp1 + temp2) | (temp1 + temp2);
@@ -756,7 +722,6 @@
/// CHECK: Goto
public static long $noinline$AddNegs3(long arg1, long arg2) {
- if (doThrow) { throw new Error(); }
long res = 0;
long n_arg1 = -arg1;
long n_arg2 = -arg2;
@@ -790,7 +755,6 @@
/// CHECK-NOT: Add
public static long $noinline$AddNeg1(long arg1, long arg2) {
- if (doThrow) { throw new Error(); }
return -arg1 + arg2;
}
@@ -825,7 +789,6 @@
/// CHECK-NOT: Sub
public static long $noinline$AddNeg2(long arg1, long arg2) {
- if (doThrow) { throw new Error(); }
long temp = -arg2;
return (arg1 + temp) | (arg1 + temp);
}
@@ -849,7 +812,6 @@
/// CHECK-NOT: Neg
public static long $noinline$NegNeg1(long arg) {
- if (doThrow) { throw new Error(); }
return -(-arg);
}
@@ -883,7 +845,6 @@
/// CHECK: Return [<<Const0>>]
public static int $noinline$NegNeg2(int arg) {
- if (doThrow) { throw new Error(); }
int temp = -arg;
return temp + -temp;
}
@@ -911,7 +872,6 @@
/// CHECK-NOT: Sub
public static long $noinline$NegNeg3(long arg) {
- if (doThrow) { throw new Error(); }
return 0 - -arg;
}
@@ -938,7 +898,6 @@
/// CHECK-NOT: Neg
public static int $noinline$NegSub1(int arg1, int arg2) {
- if (doThrow) { throw new Error(); }
return -(arg1 - arg2);
}
@@ -971,7 +930,6 @@
/// CHECK-DAG: Return [<<Or>>]
public static int $noinline$NegSub2(int arg1, int arg2) {
- if (doThrow) { throw new Error(); }
int temp = arg1 - arg2;
return -temp | -temp;
}
@@ -996,7 +954,6 @@
/// CHECK-NOT: Xor
public static long $noinline$NotNot1(long arg) {
- if (doThrow) { throw new Error(); }
return ~~arg;
}
@@ -1022,7 +979,6 @@
/// CHECK-NOT: Xor
public static int $noinline$NotNot2(int arg) {
- if (doThrow) { throw new Error(); }
int temp = ~arg;
return temp + ~temp;
}
@@ -1050,7 +1006,6 @@
/// CHECK-NOT: Sub
public static int $noinline$SubNeg1(int arg1, int arg2) {
- if (doThrow) { throw new Error(); }
return -arg1 - arg2;
}
@@ -1086,7 +1041,6 @@
/// CHECK-NOT: Add
public static int $noinline$SubNeg2(int arg1, int arg2) {
- if (doThrow) { throw new Error(); }
int temp = -arg1;
return (temp - arg2) | (temp - arg2);
}
@@ -1122,7 +1076,6 @@
/// CHECK: Goto
public static long $noinline$SubNeg3(long arg1, long arg2) {
- if (doThrow) { throw new Error(); }
long res = 0;
long temp = -arg1;
for (long i = 0; i < 1; i++) {
@@ -1146,7 +1099,6 @@
/// CHECK-DAG: Return [<<True>>]
public static boolean $noinline$EqualBoolVsIntConst(boolean arg) {
- if (doThrow) { throw new Error(); }
// Make calls that will be inlined to make sure the instruction simplifier
// sees the simplification (dead code elimination will also try to simplify it).
return (arg ? $inline$ReturnArg(0) : $inline$ReturnArg(1)) != 2;
@@ -1171,7 +1123,6 @@
/// CHECK-DAG: Return [<<False>>]
public static boolean $noinline$NotEqualBoolVsIntConst(boolean arg) {
- if (doThrow) { throw new Error(); }
// Make calls that will be inlined to make sure the instruction simplifier
// sees the simplification (dead code elimination will also try to simplify it).
return (arg ? $inline$ReturnArg(0) : $inline$ReturnArg(1)) == 2;
@@ -1232,7 +1183,6 @@
}
public static boolean $noinline$NotNotBool(boolean arg) {
- if (doThrow) { throw new Error(); }
return !(NegateValue(arg));
}
@@ -1252,7 +1202,6 @@
/// CHECK-NOT: Div
public static float $noinline$Div2(float arg) {
- if (doThrow) { throw new Error(); }
return arg / 2.0f;
}
@@ -1271,7 +1220,6 @@
/// CHECK-START: double Main.$noinline$Div2(double) instruction_simplifier (after)
/// CHECK-NOT: Div
public static double $noinline$Div2(double arg) {
- if (doThrow) { throw new Error(); }
return arg / 2.0;
}
@@ -1291,7 +1239,6 @@
/// CHECK-NOT: Div
public static float $noinline$DivMP25(float arg) {
- if (doThrow) { throw new Error(); }
return arg / -0.25f;
}
@@ -1310,7 +1257,6 @@
/// CHECK-START: double Main.$noinline$DivMP25(double) instruction_simplifier (after)
/// CHECK-NOT: Div
public static double $noinline$DivMP25(double arg) {
- if (doThrow) { throw new Error(); }
return arg / -0.25f;
}
@@ -1330,7 +1276,6 @@
/// CHECK-NEXT: Add [<<Arg>>,<<Shift>>]
public static int $noinline$mulPow2Plus1(int arg) {
- if (doThrow) { throw new Error(); }
return arg * 9;
}
@@ -1350,7 +1295,6 @@
/// CHECK-NEXT: Sub [<<Shift>>,<<Arg>>]
public static long $noinline$mulPow2Minus1(long arg) {
- if (doThrow) { throw new Error(); }
return arg * 31;
}
@@ -1358,14 +1302,12 @@
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
/// CHECK-DAG: <<Const13:i\d+>> IntConstant 13
/// CHECK-DAG: <<Const54:i\d+>> IntConstant 54
- /// CHECK-DAG: <<doThrow:z\d+>> StaticFieldGet
/// CHECK-DAG: <<Field:z\d+>> StaticFieldGet
/// CHECK-DAG: <<NE:z\d+>> NotEqual [<<Field>>,<<Const1>>]
/// CHECK-DAG: <<Select:i\d+>> Select [<<Const13>>,<<Const54>>,<<NE>>]
/// CHECK-DAG: Return [<<Select>>]
/// CHECK-START: int Main.$noinline$booleanFieldNotEqualOne() instruction_simplifier$after_inlining (after)
- /// CHECK-DAG: <<doThrow:z\d+>> StaticFieldGet
/// CHECK-DAG: <<Field:z\d+>> StaticFieldGet
/// CHECK-DAG: <<Const13:i\d+>> IntConstant 13
/// CHECK-DAG: <<Const54:i\d+>> IntConstant 54
@@ -1373,7 +1315,6 @@
/// CHECK-DAG: Return [<<Select>>]
public static int $noinline$booleanFieldNotEqualOne() {
- if (doThrow) { throw new Error(); }
return (booleanField == $inline$true()) ? 13 : 54;
}
@@ -1381,14 +1322,12 @@
/// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
/// CHECK-DAG: <<Const13:i\d+>> IntConstant 13
/// CHECK-DAG: <<Const54:i\d+>> IntConstant 54
- /// CHECK-DAG: <<doThrow:z\d+>> StaticFieldGet
/// CHECK-DAG: <<Field:z\d+>> StaticFieldGet
/// CHECK-DAG: <<NE:z\d+>> Equal [<<Field>>,<<Const0>>]
/// CHECK-DAG: <<Select:i\d+>> Select [<<Const13>>,<<Const54>>,<<NE>>]
/// CHECK-DAG: Return [<<Select>>]
/// CHECK-START: int Main.$noinline$booleanFieldEqualZero() instruction_simplifier$after_inlining (after)
- /// CHECK-DAG: <<doThrow:z\d+>> StaticFieldGet
/// CHECK-DAG: <<Field:z\d+>> StaticFieldGet
/// CHECK-DAG: <<Const13:i\d+>> IntConstant 13
/// CHECK-DAG: <<Const54:i\d+>> IntConstant 54
@@ -1396,7 +1335,6 @@
/// CHECK-DAG: Return [<<Select>>]
public static int $noinline$booleanFieldEqualZero() {
- if (doThrow) { throw new Error(); }
return (booleanField != $inline$false()) ? 13 : 54;
}
@@ -1425,7 +1363,6 @@
// LessThanOrEqual instructions.
public static int $noinline$intConditionNotEqualOne(int i) {
- if (doThrow) { throw new Error(); }
return ((i > 42) == $inline$true()) ? 13 : 54;
}
@@ -1454,7 +1391,6 @@
// LessThanOrEqual instructions.
public static int $noinline$intConditionEqualZero(int i) {
- if (doThrow) { throw new Error(); }
return ((i > 42) != $inline$false()) ? 13 : 54;
}
@@ -1473,7 +1409,6 @@
/// CHECK-DAG: Return [<<Select>>]
public static int $noinline$floatConditionNotEqualOne(float f) {
- if (doThrow) { throw new Error(); }
return ((f > 42.0f) == true) ? 13 : 54;
}
@@ -1490,7 +1425,6 @@
/// CHECK-DAG: Return [<<Select>>]
public static int $noinline$doubleConditionEqualZero(double d) {
- if (doThrow) { throw new Error(); }
return ((d > 42.0) != false) ? 13 : 54;
}
@@ -1508,7 +1442,6 @@
/// CHECK-NOT: TypeConversion
public static int $noinline$intToDoubleToInt(int value) {
- if (doThrow) { throw new Error(); }
// Lossless conversion followed by a conversion back.
return (int) (double) value;
}
@@ -1527,7 +1460,6 @@
/// CHECK-NOT: TypeConversion
public static String $noinline$intToDoubleToIntPrint(int value) {
- if (doThrow) { throw new Error(); }
// Lossless conversion followed by a conversion back
// with another use of the intermediate result.
double d = (double) value;
@@ -1549,7 +1481,6 @@
/// CHECK-NOT: TypeConversion
public static int $noinline$byteToDoubleToInt(byte value) {
- if (doThrow) { throw new Error(); }
// Lossless conversion followed by another conversion, use implicit conversion.
return (int) (double) value;
}
@@ -1570,7 +1501,6 @@
/// CHECK-NOT: TypeConversion
public static int $noinline$floatToDoubleToInt(float value) {
- if (doThrow) { throw new Error(); }
// Lossless conversion followed by another conversion.
return (int) (double) value;
}
@@ -1586,7 +1516,6 @@
/// CHECK-DAG: {{i\d+}} TypeConversion [<<Double>>]
public static String $noinline$floatToDoubleToIntPrint(float value) {
- if (doThrow) { throw new Error(); }
// Lossless conversion followed by another conversion with
// an extra use of the intermediate result.
double d = (double) value;
@@ -1609,7 +1538,6 @@
/// CHECK-NOT: TypeConversion
public static short $noinline$byteToDoubleToShort(byte value) {
- if (doThrow) { throw new Error(); }
// Originally, this is byte->double->int->short. The first conversion is lossless,
// so we merge this with the second one to byte->int which we omit as it's an implicit
// conversion. Then we eliminate the resulting byte->short as an implicit conversion.
@@ -1633,7 +1561,6 @@
/// CHECK-NOT: TypeConversion
public static short $noinline$charToDoubleToShort(char value) {
- if (doThrow) { throw new Error(); }
// Originally, this is char->double->int->short. The first conversion is lossless,
// so we merge this with the second one to char->int which we omit as it's an implicit
// conversion. Then we are left with the resulting char->short conversion.
@@ -1653,7 +1580,6 @@
/// CHECK-DAG: Return [<<Short>>]
public static short $noinline$floatToIntToShort(float value) {
- if (doThrow) { throw new Error(); }
// Lossy FP to integral conversion followed by another conversion: no simplification.
return (short) value;
}
@@ -1671,7 +1597,6 @@
/// CHECK-DAG: Return [<<Int>>]
public static int $noinline$intToFloatToInt(int value) {
- if (doThrow) { throw new Error(); }
// Lossy integral to FP conversion followed another conversion: no simplification.
return (int) (float) value;
}
@@ -1689,7 +1614,6 @@
/// CHECK-DAG: Return [<<Double>>]
public static double $noinline$longToIntToDouble(long value) {
- if (doThrow) { throw new Error(); }
// Lossy long-to-int conversion followed an integral to FP conversion: no simplification.
return (double) (int) value;
}
@@ -1707,7 +1631,6 @@
/// CHECK-DAG: Return [<<Long>>]
public static long $noinline$longToIntToLong(long value) {
- if (doThrow) { throw new Error(); }
// Lossy long-to-int conversion followed an int-to-long conversion: no simplification.
return (long) (int) value;
}
@@ -1723,7 +1646,6 @@
/// CHECK-DAG: Return [<<Arg>>]
public static short $noinline$shortToCharToShort(short value) {
- if (doThrow) { throw new Error(); }
// Integral conversion followed by non-widening integral conversion to original type.
return (short) (char) value;
}
@@ -1739,7 +1661,6 @@
/// CHECK-DAG: Return [<<Arg>>]
public static int $noinline$shortToLongToInt(short value) {
- if (doThrow) { throw new Error(); }
// Integral conversion followed by non-widening integral conversion, use implicit conversion.
return (int) (long) value;
}
@@ -1756,7 +1677,6 @@
/// CHECK-DAG: Return [<<Byte>>]
public static byte $noinline$shortToCharToByte(short value) {
- if (doThrow) { throw new Error(); }
// Integral conversion followed by non-widening integral conversion losing bits
// from the original type. Simplify to use only one conversion.
return (byte) (char) value;
@@ -1773,7 +1693,6 @@
/// CHECK-DAG: {{b\d+}} TypeConversion [<<Char>>]
public static String $noinline$shortToCharToBytePrint(short value) {
- if (doThrow) { throw new Error(); }
// Integral conversion followed by non-widening integral conversion losing bits
// from the original type with an extra use of the intermediate result.
char c = (char) value;
@@ -1781,6 +1700,62 @@
return "c=" + ((int) c) + ", b=" + ((int) b); // implicit conversions.
}
+ /// CHECK-START: long Main.$noinline$intAndSmallLongConstant(int) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Mask:j\d+>> LongConstant -12345678
+ /// CHECK-DAG: <<Long:j\d+>> TypeConversion [<<Arg>>]
+ /// CHECK-DAG: <<And:j\d+>> And [<<Long>>,<<Mask>>]
+ /// CHECK-DAG: Return [<<And>>]
+
+ /// CHECK-START: long Main.$noinline$intAndSmallLongConstant(int) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Mask:i\d+>> IntConstant -12345678
+ /// CHECK-DAG: <<And:i\d+>> And [<<Arg>>,<<Mask>>]
+ /// CHECK-DAG: <<Long:j\d+>> TypeConversion [<<And>>]
+ /// CHECK-DAG: Return [<<Long>>]
+
+ public static long $noinline$intAndSmallLongConstant(int value) {
+ return value & -12345678L; // Shall be simplified (constant is 32-bit).
+ }
+
+ /// CHECK-START: long Main.$noinline$intAndLargeLongConstant(int) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Mask:j\d+>> LongConstant 9876543210
+ /// CHECK-DAG: <<Long:j\d+>> TypeConversion [<<Arg>>]
+ /// CHECK-DAG: <<And:j\d+>> And [<<Long>>,<<Mask>>]
+ /// CHECK-DAG: Return [<<And>>]
+
+ /// CHECK-START: long Main.$noinline$intAndLargeLongConstant(int) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Mask:j\d+>> LongConstant 9876543210
+ /// CHECK-DAG: <<Long:j\d+>> TypeConversion [<<Arg>>]
+ /// CHECK-DAG: <<And:j\d+>> And [<<Long>>,<<Mask>>]
+ /// CHECK-DAG: Return [<<And>>]
+
+ public static long $noinline$intAndLargeLongConstant(int value) {
+ return value & 9876543210L; // Shall not be simplified (constant is not 32-bit).
+ }
+
+ /// CHECK-START: long Main.$noinline$intShr28And15L(int) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Shift:i\d+>> IntConstant 28
+ /// CHECK-DAG: <<Mask:j\d+>> LongConstant 15
+ /// CHECK-DAG: <<Shifted:i\d+>> Shr [<<Arg>>,<<Shift>>]
+ /// CHECK-DAG: <<Long:j\d+>> TypeConversion [<<Shifted>>]
+ /// CHECK-DAG: <<And:j\d+>> And [<<Long>>,<<Mask>>]
+ /// CHECK-DAG: Return [<<And>>]
+
+ /// CHECK-START: long Main.$noinline$intShr28And15L(int) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Shift:i\d+>> IntConstant 28
+ /// CHECK-DAG: <<Shifted:i\d+>> UShr [<<Arg>>,<<Shift>>]
+ /// CHECK-DAG: <<Long:j\d+>> TypeConversion [<<Shifted>>]
+ /// CHECK-DAG: Return [<<Long>>]
+
+ public static long $noinline$intShr28And15L(int value) {
+ return (value >> 28) & 15L;
+ }
+
/// CHECK-START: byte Main.$noinline$longAnd0xffToByte(long) instruction_simplifier (before)
/// CHECK-DAG: <<Arg:j\d+>> ParameterValue
/// CHECK-DAG: <<Mask:j\d+>> LongConstant 255
@@ -1798,7 +1773,6 @@
/// CHECK-NOT: And
public static byte $noinline$longAnd0xffToByte(long value) {
- if (doThrow) { throw new Error(); }
return (byte) (value & 0xff);
}
@@ -1818,7 +1792,6 @@
/// CHECK-NOT: And
public static char $noinline$intAnd0x1ffffToChar(int value) {
- if (doThrow) { throw new Error(); }
// Keeping all significant bits and one more.
return (char) (value & 0x1ffff);
}
@@ -1838,7 +1811,6 @@
/// CHECK-DAG: Return [<<Short>>]
public static short $noinline$intAnd0x17fffToShort(int value) {
- if (doThrow) { throw new Error(); }
// No simplification: clearing a significant bit.
return (short) (value & 0x17fff);
}
@@ -1857,7 +1829,6 @@
/// CHECK-DAG: Return [<<Double>>]
public static double $noinline$shortAnd0xffffToShortToDouble(short value) {
- if (doThrow) { throw new Error(); }
short same = (short) (value & 0xffff);
return (double) same;
}
@@ -1873,7 +1844,6 @@
/// CHECK-DAG: <<GE:z\d+>> GreaterThanOrEqual [<<Arg>>,<<Const42>>]
public static int $noinline$intReverseCondition(int i) {
- if (doThrow) { throw new Error(); }
return (42 > i) ? 13 : 54;
}
@@ -1888,12 +1858,10 @@
/// CHECK-DAG: <<EQ:z\d+>> Equal [<<Result>>,<<Const42>>]
public static int $noinline$intReverseConditionNaN(int i) {
- if (doThrow) { throw new Error(); }
return (42 != Math.sqrt(i)) ? 13 : 54;
}
public static int $noinline$runSmaliTest(String name, boolean input) {
- if (doThrow) { throw new Error(); }
try {
Class<?> c = Class.forName("SmaliTests");
Method m = c.getMethod(name, boolean.class);
@@ -1904,7 +1872,6 @@
}
public static boolean $noinline$runSmaliTestBoolean(String name, boolean input) {
- if (doThrow) { throw new Error(); }
try {
Class<?> c = Class.forName("SmaliTests");
Method m = c.getMethod(name, boolean.class);
@@ -1915,7 +1882,6 @@
}
public static int $noinline$runSmaliTestInt(String name, int arg) {
- if (doThrow) { throw new Error(); }
try {
Class<?> c = Class.forName("SmaliTests");
Method m = c.getMethod(name, int.class);
@@ -1926,7 +1892,6 @@
}
public static long $noinline$runSmaliTestLong(String name, long arg) {
- if (doThrow) { throw new Error(); }
try {
Class<?> c = Class.forName("SmaliTests");
Method m = c.getMethod(name, long.class);
@@ -1951,7 +1916,6 @@
/// CHECK-DAG: Return [<<Shl>>]
public static int $noinline$intUnnecessaryShiftMasking(int value, int shift) {
- if (doThrow) { throw new Error(); }
return value << (shift & 31);
}
@@ -1970,7 +1934,6 @@
/// CHECK-DAG: Return [<<Shr>>]
public static long $noinline$longUnnecessaryShiftMasking(long value, int shift) {
- if (doThrow) { throw new Error(); }
return value >> (shift & 63);
}
@@ -1989,7 +1952,6 @@
/// CHECK-DAG: Return [<<UShr>>]
public static int $noinline$intUnnecessaryWiderShiftMasking(int value, int shift) {
- if (doThrow) { throw new Error(); }
return value >>> (shift & 0xff);
}
@@ -2010,7 +1972,6 @@
/// CHECK-DAG: Return [<<Shl>>]
public static long $noinline$longSmallerShiftMasking(long value, int shift) {
- if (doThrow) { throw new Error(); }
return value << (shift & 3);
}
@@ -2033,11 +1994,86 @@
/// CHECK-DAG: Return [<<Add>>]
public static int $noinline$otherUseOfUnnecessaryShiftMasking(int value, int shift) {
- if (doThrow) { throw new Error(); }
int temp = shift & 31;
return (value >> temp) + temp;
}
+ /// CHECK-START: int Main.$noinline$intUnnecessaryShiftModifications(int, int) instruction_simplifier (before)
+ /// CHECK: <<Value:i\d+>> ParameterValue
+ /// CHECK: <<Shift:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Const32:i\d+>> IntConstant 32
+ /// CHECK-DAG: <<Const64:i\d+>> IntConstant 64
+ /// CHECK-DAG: <<Const96:i\d+>> IntConstant 96
+ /// CHECK-DAG: <<Const128:i\d+>> IntConstant 128
+ /// CHECK-DAG: <<Or:i\d+>> Or [<<Shift>>,<<Const32>>]
+ /// CHECK-DAG: <<Xor:i\d+>> Xor [<<Shift>>,<<Const64>>]
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Shift>>,<<Const96>>]
+ /// CHECK-DAG: <<Sub:i\d+>> Sub [<<Shift>>,<<Const128>>]
+ /// CHECK-DAG: <<Conv:b\d+>> TypeConversion [<<Shift>>]
+ /// CHECK-DAG: Shl [<<Value>>,<<Or>>]
+ /// CHECK-DAG: Shr [<<Value>>,<<Xor>>]
+ /// CHECK-DAG: UShr [<<Value>>,<<Add>>]
+ /// CHECK-DAG: Shl [<<Value>>,<<Sub>>]
+ /// CHECK-DAG: Shr [<<Value>>,<<Conv>>]
+
+ /// CHECK-START: int Main.$noinline$intUnnecessaryShiftModifications(int, int) instruction_simplifier (after)
+ /// CHECK: <<Value:i\d+>> ParameterValue
+ /// CHECK: <<Shift:i\d+>> ParameterValue
+ /// CHECK-DAG: Shl [<<Value>>,<<Shift>>]
+ /// CHECK-DAG: Shr [<<Value>>,<<Shift>>]
+ /// CHECK-DAG: UShr [<<Value>>,<<Shift>>]
+ /// CHECK-DAG: Shl [<<Value>>,<<Shift>>]
+ /// CHECK-DAG: Shr [<<Value>>,<<Shift>>]
+
+ public static int $noinline$intUnnecessaryShiftModifications(int value, int shift) {
+ int c128 = 128;
+ return (value << (shift | 32)) +
+ (value >> (shift ^ 64)) +
+ (value >>> (shift + 96)) +
+ (value << (shift - c128)) + // Needs a named constant to generate Sub.
+ (value >> ((byte) shift));
+ }
+
+ /// CHECK-START: int Main.$noinline$intNecessaryShiftModifications(int, int) instruction_simplifier (before)
+ /// CHECK: <<Value:i\d+>> ParameterValue
+ /// CHECK: <<Shift:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Const33:i\d+>> IntConstant 33
+ /// CHECK-DAG: <<Const65:i\d+>> IntConstant 65
+ /// CHECK-DAG: <<Const97:i\d+>> IntConstant 97
+ /// CHECK-DAG: <<Const129:i\d+>> IntConstant 129
+ /// CHECK-DAG: <<Or:i\d+>> Or [<<Shift>>,<<Const33>>]
+ /// CHECK-DAG: <<Xor:i\d+>> Xor [<<Shift>>,<<Const65>>]
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Shift>>,<<Const97>>]
+ /// CHECK-DAG: <<Sub:i\d+>> Sub [<<Shift>>,<<Const129>>]
+ /// CHECK-DAG: Shl [<<Value>>,<<Or>>]
+ /// CHECK-DAG: Shr [<<Value>>,<<Xor>>]
+ /// CHECK-DAG: UShr [<<Value>>,<<Add>>]
+ /// CHECK-DAG: Shl [<<Value>>,<<Sub>>]
+
+ /// CHECK-START: int Main.$noinline$intNecessaryShiftModifications(int, int) instruction_simplifier (after)
+ /// CHECK: <<Value:i\d+>> ParameterValue
+ /// CHECK: <<Shift:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Const33:i\d+>> IntConstant 33
+ /// CHECK-DAG: <<Const65:i\d+>> IntConstant 65
+ /// CHECK-DAG: <<Const97:i\d+>> IntConstant 97
+ /// CHECK-DAG: <<Const129:i\d+>> IntConstant 129
+ /// CHECK-DAG: <<Or:i\d+>> Or [<<Shift>>,<<Const33>>]
+ /// CHECK-DAG: <<Xor:i\d+>> Xor [<<Shift>>,<<Const65>>]
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Shift>>,<<Const97>>]
+ /// CHECK-DAG: <<Sub:i\d+>> Sub [<<Shift>>,<<Const129>>]
+ /// CHECK-DAG: Shl [<<Value>>,<<Or>>]
+ /// CHECK-DAG: Shr [<<Value>>,<<Xor>>]
+ /// CHECK-DAG: UShr [<<Value>>,<<Add>>]
+ /// CHECK-DAG: Shl [<<Value>>,<<Sub>>]
+
+ public static int $noinline$intNecessaryShiftModifications(int value, int shift) {
+ int c129 = 129;
+ return (value << (shift | 33)) +
+ (value >> (shift ^ 65)) +
+ (value >>> (shift + 97)) +
+ (value << (shift - c129)); // Needs a named constant to generate Sub.
+ }
+
/// CHECK-START: int Main.$noinline$intAddSubSimplifyArg1(int, int) instruction_simplifier (before)
/// CHECK: <<X:i\d+>> ParameterValue
/// CHECK: <<Y:i\d+>> ParameterValue
@@ -2052,7 +2088,6 @@
/// CHECK-DAG: Return [<<Y>>]
public static int $noinline$intAddSubSimplifyArg1(int x, int y) {
- if (doThrow) { throw new Error(); }
int sum = x + y;
return sum - x;
}
@@ -2071,7 +2106,6 @@
/// CHECK-DAG: Return [<<X>>]
public static int $noinline$intAddSubSimplifyArg2(int x, int y) {
- if (doThrow) { throw new Error(); }
int sum = x + y;
return sum - y;
}
@@ -2090,7 +2124,6 @@
/// CHECK-DAG: Return [<<X>>]
public static int $noinline$intSubAddSimplifyLeft(int x, int y) {
- if (doThrow) { throw new Error(); }
int sub = x - y;
return sub + y;
}
@@ -2109,7 +2142,6 @@
/// CHECK-DAG: Return [<<X>>]
public static int $noinline$intSubAddSimplifyRight(int x, int y) {
- if (doThrow) { throw new Error(); }
int sub = x - y;
return y + sub;
}
@@ -2129,7 +2161,6 @@
/// CHECK-DAG: Return [<<Res>>]
public static float $noinline$floatAddSubSimplifyArg1(float x, float y) {
- if (doThrow) { throw new Error(); }
float sum = x + y;
return sum - x;
}
@@ -2149,7 +2180,6 @@
/// CHECK-DAG: Return [<<Res>>]
public static float $noinline$floatAddSubSimplifyArg2(float x, float y) {
- if (doThrow) { throw new Error(); }
float sum = x + y;
return sum - y;
}
@@ -2169,7 +2199,6 @@
/// CHECK-DAG: Return [<<Res>>]
public static float $noinline$floatSubAddSimplifyLeft(float x, float y) {
- if (doThrow) { throw new Error(); }
float sub = x - y;
return sub + y;
}
@@ -2189,7 +2218,6 @@
/// CHECK-DAG: Return [<<Res>>]
public static float $noinline$floatSubAddSimplifyRight(float x, float y) {
- if (doThrow) { throw new Error(); }
float sub = x - y;
return y + sub;
}
@@ -2231,9 +2259,9 @@
assertIntEquals(1, $noinline$NegSub1(arg, arg + 1));
assertIntEquals(1, $noinline$NegSub2(arg, arg + 1));
assertLongEquals(arg, $noinline$NotNot1(arg));
- assertLongEquals(arg, $noinline$runSmaliTestLong("NotNot1", arg));
+ assertLongEquals(arg, $noinline$runSmaliTestLong("$noinline$NotNot1", arg));
assertIntEquals(-1, $noinline$NotNot2(arg));
- assertIntEquals(-1, $noinline$runSmaliTestInt("NotNot2", arg));
+ assertIntEquals(-1, $noinline$runSmaliTestInt("$noinline$NotNot2", arg));
assertIntEquals(-(arg + arg + 1), $noinline$SubNeg1(arg, arg + 1));
assertIntEquals(-(arg + arg + 1), $noinline$SubNeg2(arg, arg + 1));
assertLongEquals(-(2 * arg + 1), $noinline$SubNeg3(arg, arg + 1));
@@ -2242,9 +2270,9 @@
assertBooleanEquals(false, $noinline$NotEqualBoolVsIntConst(false));
assertBooleanEquals(false, $noinline$NotEqualBoolVsIntConst(false));
assertBooleanEquals(true, $noinline$NotNotBool(true));
- assertBooleanEquals(true, $noinline$runSmaliTestBoolean("NotNotBool", true));
+ assertBooleanEquals(true, $noinline$runSmaliTestBoolean("$noinline$NotNotBool", true));
assertBooleanEquals(false, $noinline$NotNotBool(false));
- assertBooleanEquals(false, $noinline$runSmaliTestBoolean("NotNotBool", false));
+ assertBooleanEquals(false, $noinline$runSmaliTestBoolean("$noinline$NotNotBool", false));
assertFloatEquals(50.0f, $noinline$Div2(100.0f));
assertDoubleEquals(75.0, $noinline$Div2(150.0));
assertFloatEquals(-400.0f, $noinline$DivMP25(100.0f));
@@ -2323,6 +2351,12 @@
assertStringEquals("c=1023, b=-1", $noinline$shortToCharToBytePrint((short) 1023));
assertStringEquals("c=65535, b=-1", $noinline$shortToCharToBytePrint((short) -1));
+ assertLongEquals(0x55411410L, $noinline$intAndSmallLongConstant(0x55555555));
+ assertLongEquals(0xffffffffaa028aa2L, $noinline$intAndSmallLongConstant(0xaaaaaaaa));
+ assertLongEquals(0x44101440L, $noinline$intAndLargeLongConstant(0x55555555));
+ assertLongEquals(0x208a002aaL, $noinline$intAndLargeLongConstant(0xaaaaaaaa));
+ assertLongEquals(7L, $noinline$intShr28And15L(0x76543210));
+
assertIntEquals(0x21, $noinline$longAnd0xffToByte(0x1234432112344321L));
assertIntEquals(0, $noinline$longAnd0xffToByte(Long.MIN_VALUE));
assertIntEquals(-1, $noinline$longAnd0xffToByte(Long.MAX_VALUE));
@@ -2363,14 +2397,22 @@
assertIntEquals(26, $noinline$runSmaliTestInt("SubSubConst3", 5));
assertIntEquals(0x5e6f7808, $noinline$intUnnecessaryShiftMasking(0xabcdef01, 3));
assertIntEquals(0x5e6f7808, $noinline$intUnnecessaryShiftMasking(0xabcdef01, 3 + 32));
- assertLongEquals(0xffffffffffffeaf3L, $noinline$longUnnecessaryShiftMasking(0xabcdef0123456789L, 50));
- assertLongEquals(0xffffffffffffeaf3L, $noinline$longUnnecessaryShiftMasking(0xabcdef0123456789L, 50 + 64));
+ assertLongEquals(0xffffffffffffeaf3L,
+ $noinline$longUnnecessaryShiftMasking(0xabcdef0123456789L, 50));
+ assertLongEquals(0xffffffffffffeaf3L,
+ $noinline$longUnnecessaryShiftMasking(0xabcdef0123456789L, 50 + 64));
assertIntEquals(0x2af37b, $noinline$intUnnecessaryWiderShiftMasking(0xabcdef01, 10));
assertIntEquals(0x2af37b, $noinline$intUnnecessaryWiderShiftMasking(0xabcdef01, 10 + 128));
- assertLongEquals(0xaf37bc048d159e24L, $noinline$longSmallerShiftMasking(0xabcdef0123456789L, 2));
- assertLongEquals(0xaf37bc048d159e24L, $noinline$longSmallerShiftMasking(0xabcdef0123456789L, 2 + 256));
+ assertLongEquals(0xaf37bc048d159e24L,
+ $noinline$longSmallerShiftMasking(0xabcdef0123456789L, 2));
+ assertLongEquals(0xaf37bc048d159e24L,
+ $noinline$longSmallerShiftMasking(0xabcdef0123456789L, 2 + 256));
assertIntEquals(0xfffd5e7c, $noinline$otherUseOfUnnecessaryShiftMasking(0xabcdef01, 13));
assertIntEquals(0xfffd5e7c, $noinline$otherUseOfUnnecessaryShiftMasking(0xabcdef01, 13 + 512));
+ assertIntEquals(0x5f49eb48, $noinline$intUnnecessaryShiftModifications(0xabcdef01, 2));
+ assertIntEquals(0xbd4c29b0, $noinline$intUnnecessaryShiftModifications(0xabcdef01, 3));
+ assertIntEquals(0xc0fed1ca, $noinline$intNecessaryShiftModifications(0xabcdef01, 2));
+ assertIntEquals(0x03578ebc, $noinline$intNecessaryShiftModifications(0xabcdef01, 3));
assertIntEquals(654321, $noinline$intAddSubSimplifyArg1(arg, 654321));
assertIntEquals(arg, $noinline$intAddSubSimplifyArg2(arg, 654321));
diff --git a/test/463-checker-boolean-simplifier/build b/test/463-checker-boolean-simplifier/build
index 49292c9..3721955 100755
--- a/test/463-checker-boolean-simplifier/build
+++ b/test/463-checker-boolean-simplifier/build
@@ -20,4 +20,7 @@
# Also disable desugar because it is missing in jack platform builds.
export DESUGAR=false
+# See b/65168732
+export USE_D8=false
+
./default-build "$@"
diff --git a/test/476-checker-ctor-fence-redun-elim/build b/test/476-checker-ctor-fence-redun-elim/build
new file mode 100644
index 0000000..10ffcc5
--- /dev/null
+++ b/test/476-checker-ctor-fence-redun-elim/build
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See b/65168732
+export USE_D8=false
+
+./default-build "$@"
diff --git a/test/482-checker-loop-back-edge-use/build b/test/482-checker-loop-back-edge-use/build
new file mode 100644
index 0000000..10ffcc5
--- /dev/null
+++ b/test/482-checker-loop-back-edge-use/build
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See b/65168732
+export USE_D8=false
+
+./default-build "$@"
diff --git a/test/484-checker-register-hints/build b/test/484-checker-register-hints/build
new file mode 100644
index 0000000..10ffcc5
--- /dev/null
+++ b/test/484-checker-register-hints/build
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See b/65168732
+export USE_D8=false
+
+./default-build "$@"
diff --git a/test/530-checker-lse/build b/test/530-checker-lse/build
new file mode 100755
index 0000000..10ffcc5
--- /dev/null
+++ b/test/530-checker-lse/build
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See b/65168732
+export USE_D8=false
+
+./default-build "$@"
diff --git a/test/536-checker-intrinsic-optimization/smali/SmaliTests.smali b/test/536-checker-intrinsic-optimization/smali/SmaliTests.smali
index 6612fae..ffb1853 100644
--- a/test/536-checker-intrinsic-optimization/smali/SmaliTests.smali
+++ b/test/536-checker-intrinsic-optimization/smali/SmaliTests.smali
@@ -62,3 +62,49 @@
return v1
.end method
+## CHECK-START: char SmaliTests.stringCharAtCatchPhiReturn(java.lang.String, int) instruction_simplifier (before)
+## CHECK-DAG: <<Int:i\d+>> IntConstant 0
+## CHECK-DAG: <<Char:c\d+>> InvokeVirtual intrinsic:StringCharAt
+## CHECK-DAG: <<Phi:i\d+>> Phi [<<Char>>,<<Int>>]
+## CHECK-DAG: Return [<<Phi>>]
+
+## CHECK-START: char SmaliTests.stringCharAtCatchPhiReturn(java.lang.String, int) instruction_simplifier (after)
+## CHECK-DAG: <<String:l\d+>> ParameterValue
+## CHECK-DAG: <<Pos:i\d+>> ParameterValue
+## CHECK-DAG: <<Int:i\d+>> IntConstant 0
+## CHECK-DAG: <<NullCk:l\d+>> NullCheck [<<String>>]
+## CHECK-DAG: <<Length:i\d+>> ArrayLength [<<NullCk>>] is_string_length:true
+## CHECK-DAG: <<Bounds:i\d+>> BoundsCheck [<<Pos>>,<<Length>>] is_string_char_at:true
+## CHECK-DAG: <<Char:c\d+>> ArrayGet [<<NullCk>>,<<Bounds>>] is_string_char_at:true
+## CHECK-DAG: <<Phi:i\d+>> Phi [<<Char>>,<<Int>>]
+## CHECK-DAG: Return [<<Phi>>]
+
+## CHECK-START: char SmaliTests.stringCharAtCatchPhiReturn(java.lang.String, int) instruction_simplifier (after)
+## CHECK-NOT: InvokeVirtual intrinsic:StringCharAt
+.method public static stringCharAtCatchPhiReturn(Ljava/lang/String;I)C
+ .registers 4
+
+ sget-boolean v1, LMain;->doThrow:Z
+
+ if-eqz v1, :cond_a
+ new-instance v1, Ljava/lang/Error;
+ invoke-direct {v1}, Ljava/lang/Error;-><init>()V
+ throw v1
+
+ :cond_a
+ :try_start_a
+ invoke-virtual {p0, p1}, Ljava/lang/String;->charAt(I)C
+ :try_end_d
+ .catch Ljava/lang/StringIndexOutOfBoundsException; {:try_start_a .. :try_end_d} :catch_f
+
+ move-result v1
+
+ :goto_e
+ return v1
+
+ :catch_f
+ move-exception v0
+
+ const/4 v1, 0x0
+ goto :goto_e
+.end method
diff --git a/test/536-checker-intrinsic-optimization/src/Main.java b/test/536-checker-intrinsic-optimization/src/Main.java
index 6d3abb1..83a89a6 100644
--- a/test/536-checker-intrinsic-optimization/src/Main.java
+++ b/test/536-checker-intrinsic-optimization/src/Main.java
@@ -171,8 +171,11 @@
/// CHECK-START: char Main.$opt$noinline$stringCharAtCatch(java.lang.String, int) instruction_simplifier (before)
/// CHECK-DAG: <<Int:i\d+>> IntConstant 0
/// CHECK-DAG: <<Char:c\d+>> InvokeVirtual intrinsic:StringCharAt
- /// CHECK-DAG: <<Phi:i\d+>> Phi [<<Char>>,<<Int>>]
- /// CHECK-DAG: Return [<<Phi>>]
+
+ // The return value can come from a Phi should the two returns be merged.
+ // Please refer to the Smali code for a more detailed verification.
+
+ /// CHECK-DAG: Return [{{(c|i)\d+}}]
/// CHECK-START: char Main.$opt$noinline$stringCharAtCatch(java.lang.String, int) instruction_simplifier (after)
/// CHECK-DAG: <<String:l\d+>> ParameterValue
@@ -182,8 +185,7 @@
/// CHECK-DAG: <<Length:i\d+>> ArrayLength [<<NullCk>>] is_string_length:true
/// CHECK-DAG: <<Bounds:i\d+>> BoundsCheck [<<Pos>>,<<Length>>] is_string_char_at:true
/// CHECK-DAG: <<Char:c\d+>> ArrayGet [<<NullCk>>,<<Bounds>>] is_string_char_at:true
- /// CHECK-DAG: <<Phi:i\d+>> Phi [<<Char>>,<<Int>>]
- /// CHECK-DAG: Return [<<Phi>>]
+ /// CHECK-DAG: Return [{{(c|i)\d+}}]
/// CHECK-START: char Main.$opt$noinline$stringCharAtCatch(java.lang.String, int) instruction_simplifier (after)
/// CHECK-NOT: InvokeVirtual intrinsic:StringCharAt
diff --git a/test/549-checker-types-merge/build b/test/549-checker-types-merge/build
new file mode 100644
index 0000000..10ffcc5
--- /dev/null
+++ b/test/549-checker-types-merge/build
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See b/65168732
+export USE_D8=false
+
+./default-build "$@"
diff --git a/test/565-checker-doublenegbitwise/build b/test/565-checker-doublenegbitwise/build
index 49292c9..3721955 100755
--- a/test/565-checker-doublenegbitwise/build
+++ b/test/565-checker-doublenegbitwise/build
@@ -20,4 +20,7 @@
# Also disable desugar because it is missing in jack platform builds.
export DESUGAR=false
+# See b/65168732
+export USE_D8=false
+
./default-build "$@"
diff --git a/test/565-checker-rotate/build b/test/565-checker-rotate/build
new file mode 100644
index 0000000..10ffcc5
--- /dev/null
+++ b/test/565-checker-rotate/build
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See b/65168732
+export USE_D8=false
+
+./default-build "$@"
diff --git a/test/566-checker-signum/build b/test/566-checker-signum/build
new file mode 100644
index 0000000..10ffcc5
--- /dev/null
+++ b/test/566-checker-signum/build
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See b/65168732
+export USE_D8=false
+
+./default-build "$@"
diff --git a/test/567-checker-compare/build b/test/567-checker-compare/build
new file mode 100644
index 0000000..10ffcc5
--- /dev/null
+++ b/test/567-checker-compare/build
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See b/65168732
+export USE_D8=false
+
+./default-build "$@"
diff --git a/test/570-checker-osr/build b/test/570-checker-osr/build
new file mode 100644
index 0000000..10ffcc5
--- /dev/null
+++ b/test/570-checker-osr/build
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See b/65168732
+export USE_D8=false
+
+./default-build "$@"
diff --git a/test/586-checker-null-array-get/build b/test/586-checker-null-array-get/build
index 49292c9..3721955 100755
--- a/test/586-checker-null-array-get/build
+++ b/test/586-checker-null-array-get/build
@@ -20,4 +20,7 @@
# Also disable desugar because it is missing in jack platform builds.
export DESUGAR=false
+# See b/65168732
+export USE_D8=false
+
./default-build "$@"
diff --git a/test/593-checker-boolean-2-integral-conv/build b/test/593-checker-boolean-2-integral-conv/build
index 49292c9..3721955 100755
--- a/test/593-checker-boolean-2-integral-conv/build
+++ b/test/593-checker-boolean-2-integral-conv/build
@@ -20,4 +20,7 @@
# Also disable desugar because it is missing in jack platform builds.
export DESUGAR=false
+# See b/65168732
+export USE_D8=false
+
./default-build "$@"
diff --git a/test/597-deopt-invoke-stub/run b/test/597-deopt-invoke-stub/run
index bc04498..53b7c4c 100644
--- a/test/597-deopt-invoke-stub/run
+++ b/test/597-deopt-invoke-stub/run
@@ -14,5 +14,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# We want to run in debuggable mode and compiled.
-exec ${RUN} --jit -Xcompiler-option --debuggable "${@}"
+# In order to test deoptimizing at quick-to-interpreter bridge,
+# we want to run in debuggable mode with jit compilation.
+# We also bump up the jit threshold to 10 to make sure that the method
+# that should be interpreted is not compiled.
+exec ${RUN} --jit --runtime-option -Xjitthreshold:10000 -Xcompiler-option --debuggable "${@}"
diff --git a/test/611-checker-simplify-if/build b/test/611-checker-simplify-if/build
new file mode 100644
index 0000000..10ffcc5
--- /dev/null
+++ b/test/611-checker-simplify-if/build
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See b/65168732
+export USE_D8=false
+
+./default-build "$@"
diff --git a/test/618-checker-induction/build b/test/618-checker-induction/build
new file mode 100644
index 0000000..10ffcc5
--- /dev/null
+++ b/test/618-checker-induction/build
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See b/65168732
+export USE_D8=false
+
+./default-build "$@"
diff --git a/test/623-checker-loop-regressions/src/Main.java b/test/623-checker-loop-regressions/src/Main.java
index 418be30..f6d3bba 100644
--- a/test/623-checker-loop-regressions/src/Main.java
+++ b/test/623-checker-loop-regressions/src/Main.java
@@ -497,6 +497,13 @@
}
}
+ // Mixed of 16-bit and 8-bit array references.
+ static void castAndNarrow(byte[] x, char[] y) {
+ for (int i = 0; i < x.length; i++) {
+ x[i] = (byte) ((short) y[i] + 1);
+ }
+ }
+
public static void main(String[] args) {
expectEquals(10, earlyExitFirst(-1));
for (int i = 0; i <= 10; i++) {
@@ -650,6 +657,15 @@
expectEquals(2805, f[i]);
}
+ char[] cx = new char[259];
+ for (int i = 0; i < 259; i++) {
+ cx[i] = (char) (i - 100);
+ }
+ castAndNarrow(b1, cx);
+ for (int i = 0; i < 259; i++) {
+ expectEquals((byte)((short) cx[i] + 1), b1[i]);
+ }
+
System.out.println("passed");
}
diff --git a/test/624-checker-stringops/smali/Smali.smali b/test/624-checker-stringops/smali/Smali.smali
new file mode 100644
index 0000000..7b063c0
--- /dev/null
+++ b/test/624-checker-stringops/smali/Smali.smali
@@ -0,0 +1,212 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LSmali;
+.super Ljava/lang/Object;
+
+## CHECK-START: int Smali.bufferLen2() instruction_simplifier (before)
+## CHECK-DAG: <<New:l\d+>> NewInstance
+## CHECK-DAG: <<String1:l\d+>> LoadString
+## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>] intrinsic:StringBufferAppend
+## CHECK-DAG: <<String2:l\d+>> LoadString
+## CHECK-DAG: <<Null1:l\d+>> NullCheck [<<Append1>>]
+## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<Null1>>,<<String2>>] intrinsic:StringBufferAppend
+## CHECK-DAG: <<Null2:l\d+>> NullCheck [<<Append2>>]
+## CHECK-DAG: InvokeVirtual [<<Null2>>] intrinsic:StringBufferLength
+
+## CHECK-START: int Smali.bufferLen2() instruction_simplifier (after)
+## CHECK-DAG: <<New:l\d+>> NewInstance
+## CHECK-DAG: <<String1:l\d+>> LoadString
+## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>] intrinsic:StringBufferAppend
+## CHECK-DAG: <<String2:l\d+>> LoadString
+## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<New>>,<<String2>>] intrinsic:StringBufferAppend
+## CHECK-DAG: InvokeVirtual [<<New>>] intrinsic:StringBufferLength
+.method public static bufferLen2()I
+ .registers 3
+
+ new-instance v0, Ljava/lang/StringBuffer;
+
+ invoke-direct {v0}, Ljava/lang/StringBuffer;-><init>()V
+
+ const-string v1, "x"
+ invoke-virtual {v0, v1}, Ljava/lang/StringBuffer;->append(Ljava/lang/String;)Ljava/lang/StringBuffer;
+ move-result-object v1
+
+ const-string v2, "x"
+ invoke-virtual {v1, v2}, Ljava/lang/StringBuffer;->append(Ljava/lang/String;)Ljava/lang/StringBuffer;
+ move-result-object v1
+
+ invoke-virtual {v1}, Ljava/lang/StringBuffer;->length()I
+ move-result v1
+
+ return v1
+.end method
+
+## CHECK-START: int Smali.builderLen2() instruction_simplifier (before)
+## CHECK-DAG: <<New:l\d+>> NewInstance
+## CHECK-DAG: <<String1:l\d+>> LoadString
+## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>] intrinsic:StringBuilderAppend
+## CHECK-DAG: <<String2:l\d+>> LoadString
+## CHECK-DAG: <<Null2:l\d+>> NullCheck [<<Append1>>]
+## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<Null2>>,<<String2>>] intrinsic:StringBuilderAppend
+## CHECK-DAG: <<Null3:l\d+>> NullCheck [<<Append2>>]
+## CHECK-DAG: InvokeVirtual [<<Null3>>] intrinsic:StringBuilderLength
+
+## CHECK-START: int Smali.builderLen2() instruction_simplifier (after)
+## CHECK-DAG: <<New:l\d+>> NewInstance
+## CHECK-DAG: <<String1:l\d+>> LoadString
+## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>] intrinsic:StringBuilderAppend
+## CHECK-DAG: <<String2:l\d+>> LoadString
+## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<New>>,<<String2>>] intrinsic:StringBuilderAppend
+## CHECK-DAG: InvokeVirtual [<<New>>] intrinsic:StringBuilderLength
+.method public static builderLen2()I
+ .registers 3
+
+ new-instance v0, Ljava/lang/StringBuilder;
+
+ invoke-direct {v0}, Ljava/lang/StringBuilder;-><init>()V
+
+ const-string v1, "x"
+ invoke-virtual {v0, v1}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder;
+ move-result-object v1
+
+ const-string v2, "x"
+ invoke-virtual {v1, v2}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder;
+ move-result-object v1
+
+ invoke-virtual {v1}, Ljava/lang/StringBuilder;->length()I
+ move-result v1
+
+ return v1
+.end method
+
+## CHECK-START: int Smali.bufferLoopAppender() instruction_simplifier (before)
+## CHECK-DAG: <<New:l\d+>> NewInstance loop:none
+## CHECK-DAG: <<String1:l\d+>> LoadString loop:<<Loop:B\d+>>
+## CHECK-DAG: <<Null1:l\d+>> NullCheck [<<New>>] loop:<<Loop>>
+## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<Null1>>,<<String1>>] intrinsic:StringBufferAppend loop:<<Loop>>
+## CHECK-DAG: <<String2:l\d+>> LoadString loop:<<Loop>>
+## CHECK-DAG: <<Null2:l\d+>> NullCheck [<<Append1>>] loop:<<Loop>>
+## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<Null2>>,<<String2>>] intrinsic:StringBufferAppend loop:<<Loop>>
+## CHECK-DAG: <<String3:l\d+>> LoadString loop:<<Loop>>
+## CHECK-DAG: <<Null3:l\d+>> NullCheck [<<Append2>>] loop:<<Loop>>
+## CHECK-DAG: <<Append3:l\d+>> InvokeVirtual [<<Null3>>,<<String3>>] intrinsic:StringBufferAppend loop:<<Loop>>
+## CHECK-DAG: <<Null4:l\d+>> NullCheck [<<New>>] loop:none
+## CHECK-DAG: InvokeVirtual [<<Null4>>] intrinsic:StringBufferLength loop:none
+
+## CHECK-START: int Smali.bufferLoopAppender() instruction_simplifier (after)
+## CHECK-DAG: <<New:l\d+>> NewInstance loop:none
+## CHECK-DAG: <<String1:l\d+>> LoadString loop:<<Loop:B\d+>>
+## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>] intrinsic:StringBufferAppend loop:<<Loop>>
+## CHECK-DAG: <<String2:l\d+>> LoadString loop:<<Loop>>
+## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<New>>,<<String2>>] intrinsic:StringBufferAppend loop:<<Loop>>
+## CHECK-DAG: <<String3:l\d+>> LoadString loop:<<Loop>>
+## CHECK-DAG: <<Append3:l\d+>> InvokeVirtual [<<New>>,<<String3>>] intrinsic:StringBufferAppend loop:<<Loop>>
+## CHECK-DAG: InvokeVirtual [<<New>>] intrinsic:StringBufferLength loop:none
+.method public static bufferLoopAppender()I
+ .registers 4
+
+ new-instance v0, Ljava/lang/StringBuffer;
+
+ invoke-direct {v0}, Ljava/lang/StringBuffer;-><init>()V
+
+ const/4 v1, 0x0
+
+ :goto_6
+ const/16 v2, 0xa
+
+ if-ge v1, v2, :cond_1e
+
+ const-string v2, "x"
+ invoke-virtual {v0, v2}, Ljava/lang/StringBuffer;->append(Ljava/lang/String;)Ljava/lang/StringBuffer;
+ move-result-object v2
+
+ const-string v3, "y"
+ invoke-virtual {v2, v3}, Ljava/lang/StringBuffer;->append(Ljava/lang/String;)Ljava/lang/StringBuffer;
+ move-result-object v2
+
+ const-string v3, "z"
+ invoke-virtual {v2, v3}, Ljava/lang/StringBuffer;->append(Ljava/lang/String;)Ljava/lang/StringBuffer;
+
+ add-int/lit8 v1, v1, 0x1
+ goto :goto_6
+
+ :cond_1e
+ invoke-virtual {v0}, Ljava/lang/StringBuffer;->length()I
+
+ move-result v2
+
+ return v2
+.end method
+
+## CHECK-START: int Smali.builderLoopAppender() instruction_simplifier (before)
+## CHECK-DAG: <<New:l\d+>> NewInstance loop:none
+## CHECK-DAG: <<String1:l\d+>> LoadString loop:<<Loop:B\d+>>
+## CHECK-DAG: <<Null1:l\d+>> NullCheck [<<New>>] loop:<<Loop>>
+## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<Null1>>,<<String1>>] intrinsic:StringBuilderAppend loop:<<Loop>>
+## CHECK-DAG: <<String2:l\d+>> LoadString loop:<<Loop>>
+## CHECK-DAG: <<Null2:l\d+>> NullCheck [<<Append1>>] loop:<<Loop>>
+## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<Null2>>,<<String2>>] intrinsic:StringBuilderAppend loop:<<Loop>>
+## CHECK-DAG: <<String3:l\d+>> LoadString loop:<<Loop>>
+## CHECK-DAG: <<Null3:l\d+>> NullCheck [<<Append2>>] loop:<<Loop>>
+## CHECK-DAG: <<Append3:l\d+>> InvokeVirtual [<<Null3>>,<<String3>>] intrinsic:StringBuilderAppend loop:<<Loop>>
+## CHECK-DAG: <<Null4:l\d+>> NullCheck [<<New>>] loop:none
+## CHECK-DAG: InvokeVirtual [<<Null4>>] intrinsic:StringBuilderLength loop:none
+
+## CHECK-START: int Smali.builderLoopAppender() instruction_simplifier (after)
+## CHECK-DAG: <<New:l\d+>> NewInstance loop:none
+## CHECK-DAG: <<String1:l\d+>> LoadString loop:<<Loop:B\d+>>
+## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>] intrinsic:StringBuilderAppend loop:<<Loop>>
+## CHECK-DAG: <<String2:l\d+>> LoadString loop:<<Loop>>
+## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<New>>,<<String2>>] intrinsic:StringBuilderAppend loop:<<Loop>>
+## CHECK-DAG: <<String3:l\d+>> LoadString loop:<<Loop>>
+## CHECK-DAG: <<Append3:l\d+>> InvokeVirtual [<<New>>,<<String3>>] intrinsic:StringBuilderAppend loop:<<Loop>>
+## CHECK-DAG: InvokeVirtual [<<New>>] intrinsic:StringBuilderLength loop:none
+.method public static builderLoopAppender()I
+ .registers 4
+
+ new-instance v0, Ljava/lang/StringBuilder;
+
+ invoke-direct {v0}, Ljava/lang/StringBuilder;-><init>()V
+
+ const/4 v1, 0x0
+
+ :goto_6
+ const/16 v2, 0xa
+
+ if-ge v1, v2, :cond_1e
+
+ const-string v2, "x"
+
+ invoke-virtual {v0, v2}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder;
+ move-result-object v2
+ const-string v3, "y"
+
+ invoke-virtual {v2, v3}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder;
+ move-result-object v2
+ const-string v3, "z"
+
+ invoke-virtual {v2, v3}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder;
+ add-int/lit8 v1, v1, 0x1
+
+ goto :goto_6
+
+ :cond_1e
+ invoke-virtual {v0}, Ljava/lang/StringBuilder;->length()I
+
+ move-result v2
+
+ return v2
+.end method
diff --git a/test/624-checker-stringops/src/Main.java b/test/624-checker-stringops/src/Main.java
index 63da4f5..3aa6e56 100644
--- a/test/624-checker-stringops/src/Main.java
+++ b/test/624-checker-stringops/src/Main.java
@@ -13,6 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+import java.lang.reflect.Method;
/**
* Tests properties of some string operations represented by intrinsics.
@@ -105,12 +106,10 @@
/// CHECK-START: int Main.bufferLen2() instruction_simplifier (before)
/// CHECK-DAG: <<New:l\d+>> NewInstance
/// CHECK-DAG: <<String1:l\d+>> LoadString
- /// CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>] intrinsic:StringBufferAppend
+ /// CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>] intrinsic:StringBufferAppend
/// CHECK-DAG: <<String2:l\d+>> LoadString
- /// CHECK-DAG: <<Null1:l\d+>> NullCheck [<<Append1>>]
- /// CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<Null1>>,<<String2>>] intrinsic:StringBufferAppend
- /// CHECK-DAG: <<Null2:l\d+>> NullCheck [<<Append2>>]
- /// CHECK-DAG: InvokeVirtual [<<Null2>>] intrinsic:StringBufferLength
+ /// CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [{{l\d+}},<<String2>>] intrinsic:StringBufferAppend
+ /// CHECK-DAG: InvokeVirtual [{{l\d+}}] intrinsic:StringBufferLength
//
/// CHECK-START: int Main.bufferLen2() instruction_simplifier (after)
/// CHECK-DAG: <<New:l\d+>> NewInstance
@@ -124,6 +123,12 @@
return s.append("x").append("x").length();
}
+ static int bufferLen2Smali() throws Exception {
+ Class<?> c = Class.forName("Smali");
+ Method m = c.getMethod("bufferLen2");
+ return (Integer) m.invoke(null);
+ }
+
//
// Allows combining of returned "this". Also ensures that similar looking append() calls
// are not combined somehow through returned result.
@@ -131,12 +136,10 @@
/// CHECK-START: int Main.builderLen2() instruction_simplifier (before)
/// CHECK-DAG: <<New:l\d+>> NewInstance
/// CHECK-DAG: <<String1:l\d+>> LoadString
- /// CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>] intrinsic:StringBuilderAppend
+ /// CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>] intrinsic:StringBuilderAppend
/// CHECK-DAG: <<String2:l\d+>> LoadString
- /// CHECK-DAG: <<Null2:l\d+>> NullCheck [<<Append1>>]
- /// CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<Null2>>,<<String2>>] intrinsic:StringBuilderAppend
- /// CHECK-DAG: <<Null3:l\d+>> NullCheck [<<Append2>>]
- /// CHECK-DAG: InvokeVirtual [<<Null3>>] intrinsic:StringBuilderLength
+ /// CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [{{l\d+}},<<String2>>] intrinsic:StringBuilderAppend
+ /// CHECK-DAG: InvokeVirtual [{{l\d+}}] intrinsic:StringBuilderLength
//
/// CHECK-START: int Main.builderLen2() instruction_simplifier (after)
/// CHECK-DAG: <<New:l\d+>> NewInstance
@@ -150,6 +153,12 @@
return s.append("x").append("x").length();
}
+ static int builderLen2Smali() throws Exception {
+ Class<?> c = Class.forName("Smali");
+ Method m = c.getMethod("builderLen2");
+ return (Integer) m.invoke(null);
+ }
+
//
// Similar situation in a loop.
//
@@ -159,13 +168,10 @@
/// CHECK-DAG: <<Null1:l\d+>> NullCheck [<<New>>] loop:<<Loop>>
/// CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<Null1>>,<<String1>>] intrinsic:StringBufferAppend loop:<<Loop>>
/// CHECK-DAG: <<String2:l\d+>> LoadString loop:<<Loop>>
- /// CHECK-DAG: <<Null2:l\d+>> NullCheck [<<Append1>>] loop:<<Loop>>
- /// CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<Null2>>,<<String2>>] intrinsic:StringBufferAppend loop:<<Loop>>
+ /// CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [{{l\d+}},<<String2>>] intrinsic:StringBufferAppend loop:<<Loop>>
/// CHECK-DAG: <<String3:l\d+>> LoadString loop:<<Loop>>
- /// CHECK-DAG: <<Null3:l\d+>> NullCheck [<<Append2>>] loop:<<Loop>>
- /// CHECK-DAG: <<Append3:l\d+>> InvokeVirtual [<<Null3>>,<<String3>>] intrinsic:StringBufferAppend loop:<<Loop>>
- /// CHECK-DAG: <<Null4:l\d+>> NullCheck [<<New>>] loop:none
- /// CHECK-DAG: InvokeVirtual [<<Null4>>] intrinsic:StringBufferLength loop:none
+ /// CHECK-DAG: <<Append3:l\d+>> InvokeVirtual [{{l\d+}},<<String3>>] intrinsic:StringBufferAppend loop:<<Loop>>
+ /// CHECK-DAG: InvokeVirtual [{{l\d+}}] intrinsic:StringBufferLength loop:none
//
/// CHECK-START: int Main.bufferLoopAppender() instruction_simplifier (after)
/// CHECK-DAG: <<New:l\d+>> NewInstance loop:none
@@ -184,6 +190,12 @@
return b.length();
}
+ static int bufferLoopAppenderSmali() throws Exception {
+ Class<?> c = Class.forName("Smali");
+ Method m = c.getMethod("bufferLoopAppender");
+ return (Integer) m.invoke(null);
+ }
+
//
// Similar situation in a loop.
//
@@ -193,13 +205,10 @@
/// CHECK-DAG: <<Null1:l\d+>> NullCheck [<<New>>] loop:<<Loop>>
/// CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<Null1>>,<<String1>>] intrinsic:StringBuilderAppend loop:<<Loop>>
/// CHECK-DAG: <<String2:l\d+>> LoadString loop:<<Loop>>
- /// CHECK-DAG: <<Null2:l\d+>> NullCheck [<<Append1>>] loop:<<Loop>>
- /// CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<Null2>>,<<String2>>] intrinsic:StringBuilderAppend loop:<<Loop>>
+ /// CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [{{l\d+}},<<String2>>] intrinsic:StringBuilderAppend loop:<<Loop>>
/// CHECK-DAG: <<String3:l\d+>> LoadString loop:<<Loop>>
- /// CHECK-DAG: <<Null3:l\d+>> NullCheck [<<Append2>>] loop:<<Loop>>
- /// CHECK-DAG: <<Append3:l\d+>> InvokeVirtual [<<Null3>>,<<String3>>] intrinsic:StringBuilderAppend loop:<<Loop>>
- /// CHECK-DAG: <<Null4:l\d+>> NullCheck [<<New>>] loop:none
- /// CHECK-DAG: InvokeVirtual [<<Null4>>] intrinsic:StringBuilderLength loop:none
+ /// CHECK-DAG: <<Append3:l\d+>> InvokeVirtual [{{l\d+}},<<String3>>] intrinsic:StringBuilderAppend loop:<<Loop>>
+ /// CHECK-DAG: InvokeVirtual [{{l\d+}}] intrinsic:StringBuilderLength loop:none
//
/// CHECK-START: int Main.builderLoopAppender() instruction_simplifier (after)
/// CHECK-DAG: <<New:l\d+>> NewInstance loop:none
@@ -218,6 +227,12 @@
return b.length();
}
+ static int builderLoopAppenderSmali() throws Exception {
+ Class<?> c = Class.forName("Smali");
+ Method m = c.getMethod("bufferLoopAppender");
+ return (Integer) m.invoke(null);
+ }
+
//
// All calls in the loop-body and thus loop can be eliminated.
//
@@ -274,7 +289,7 @@
x.toString();
}
- public static void main(String[] args) {
+ public static void main(String[] args) throws Exception {
expectEquals(1865, liveIndexOf());
expectEquals(29, deadIndexOf());
@@ -291,9 +306,13 @@
expectEquals(598, indexOfExceptions(ABC, XYZ));
expectEquals(2, bufferLen2());
+ expectEquals(2, bufferLen2Smali());
expectEquals(2, builderLen2());
+ expectEquals(2, builderLen2Smali());
expectEquals(30, bufferLoopAppender());
+ expectEquals(30, bufferLoopAppenderSmali());
expectEquals(30, builderLoopAppender());
+ expectEquals(30, builderLoopAppenderSmali());
expectEquals(0, bufferDeadLoop());
expectEquals(0, builderDeadLoop());
diff --git a/test/638-checker-inline-caches/profile b/test/638-checker-inline-caches/profile
index 1ca6d7b..7756a16 100644
--- a/test/638-checker-inline-caches/profile
+++ b/test/638-checker-inline-caches/profile
@@ -1,6 +1,6 @@
-LMain;->inlineMonomorphicSubA(LSuper;)I+LSubA;
-LMain;->inlinePolymophicSubASubB(LSuper;)I+LSubA;,LSubB;
-LMain;->inlinePolymophicCrossDexSubASubC(LSuper;)I+LSubA;,LSubC;
-LMain;->inlineMegamorphic(LSuper;)I+LSubA;,LSubB;,LSubC;,LSubD;,LSubE;
-LMain;->inlineMissingTypes(LSuper;)I+missing_types
-LMain;->noInlineCache(LSuper;)I
+HSLMain;->inlineMonomorphicSubA(LSuper;)I+LSubA;
+HSLMain;->inlinePolymophicSubASubB(LSuper;)I+LSubA;,LSubB;
+HSLMain;->inlinePolymophicCrossDexSubASubC(LSuper;)I+LSubA;,LSubC;
+HSLMain;->inlineMegamorphic(LSuper;)I+LSubA;,LSubB;,LSubC;,LSubD;,LSubE;
+HSLMain;->inlineMissingTypes(LSuper;)I+missing_types
+HSLMain;->noInlineCache(LSuper;)I
diff --git a/test/643-checker-bogus-ic/profile b/test/643-checker-bogus-ic/profile
index cbf7796..540a935 100644
--- a/test/643-checker-bogus-ic/profile
+++ b/test/643-checker-bogus-ic/profile
@@ -1,2 +1,2 @@
-LMain;->inlineMonomorphic(LMain;)I+LUnrelated;
-LMain;->inlinePolymorphic(LMain;)I+LUnrelated;,LMain;
+SHLMain;->inlineMonomorphic(LMain;)I+LUnrelated;
+SHLMain;->inlinePolymorphic(LMain;)I+LUnrelated;,LMain;
diff --git a/test/645-checker-abs-simd/src/Main.java b/test/645-checker-abs-simd/src/Main.java
index c49d85d..57c51a6 100644
--- a/test/645-checker-abs-simd/src/Main.java
+++ b/test/645-checker-abs-simd/src/Main.java
@@ -131,6 +131,28 @@
}
}
+ /// CHECK-START: void Main.doitCastedChar(char[]) loop_optimization (before)
+ /// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: InvokeStaticOrDirect intrinsic:MathAbsInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: void Main.doitCastedChar(char[]) loop_optimization (after)
+ /// CHECK-DAG: Phi loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecLoad loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecAbs loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: Phi loop:<<Loop2:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop2>> outer_loop:none
+ //
+ /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
+ //
+ private static void doitCastedChar(char[] x) {
+ for (int i = 0; i < x.length; i++) {
+ x[i] = (char) Math.abs((short) x[i]);
+ }
+ }
+
/// CHECK-START: void Main.doitInt(int[]) loop_optimization (before)
/// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
@@ -298,7 +320,7 @@
xc[i] = (char) i;
}
doitChar(xc);
- for (int i = 0; i < 1024 *64; i++) {
+ for (int i = 0; i < 1024 * 64; i++) {
expectEquals32((char) Math.abs((char) i), xc[i]);
}
short[] xs = new short[1024 * 64];
@@ -309,6 +331,13 @@
for (int i = 0; i < 1024 * 64; i++) {
expectEquals32((short) Math.abs((short) i), xs[i]);
}
+ for (int i = 0; i < 1024 * 64; i++) {
+ xc[i] = (char) i;
+ }
+ doitCastedChar(xc);
+ for (int i = 0; i < 1024 * 64; i++) {
+ expectEquals32((char) Math.abs((short) i), xc[i]);
+ }
// Set up minint32, maxint32 and some others.
int[] xi = new int[8];
xi[0] = 0x80000000;
diff --git a/test/646-checker-hadd-alt-byte/src/Main.java b/test/646-checker-hadd-alt-byte/src/Main.java
index 7be3151..87f7688 100644
--- a/test/646-checker-hadd-alt-byte/src/Main.java
+++ b/test/646-checker-hadd-alt-byte/src/Main.java
@@ -43,21 +43,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
@@ -83,21 +83,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
@@ -121,21 +121,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_signed(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
@@ -162,21 +162,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_unsigned(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
@@ -200,7 +200,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_signed_constant(byte[], byte[]) loop_optimization (after)
@@ -208,7 +208,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_signed_constant(byte[], byte[]) loop_optimization (after)
@@ -216,7 +216,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed_constant(byte[] b1, byte[] bo) {
int min_length = Math.min(bo.length, b1.length);
@@ -241,7 +241,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (after)
@@ -249,7 +249,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (after)
@@ -257,7 +257,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned_constant(byte[] b1, byte[] bo) {
int min_length = Math.min(bo.length, b1.length);
diff --git a/test/646-checker-hadd-alt-char/src/Main.java b/test/646-checker-hadd-alt-char/src/Main.java
index 2799ea7..292ea1b 100644
--- a/test/646-checker-hadd-alt-char/src/Main.java
+++ b/test/646-checker-hadd-alt-char/src/Main.java
@@ -43,21 +43,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned(char[] b1, char[] b2, char[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
@@ -83,21 +83,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
// Note: HAnd has no impact (already a zero extension).
@@ -124,21 +124,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_unsigned(char[] b1, char[] b2, char[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
@@ -165,21 +165,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
// Note: HAnd has no impact (already a zero extension).
@@ -206,7 +206,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_unsigned_constant(char[], char[]) loop_optimization (after)
@@ -214,7 +214,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_unsigned_constant(char[], char[]) loop_optimization (after)
@@ -222,7 +222,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned_constant(char[] b1, char[] bo) {
int min_length = Math.min(bo.length, b1.length);
@@ -247,7 +247,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (after)
@@ -255,7 +255,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
// Note: HAnd has no impact (already a zero extension).
diff --git a/test/646-checker-hadd-alt-short/src/Main.java b/test/646-checker-hadd-alt-short/src/Main.java
index 6cd102f..da94829 100644
--- a/test/646-checker-hadd-alt-short/src/Main.java
+++ b/test/646-checker-hadd-alt-short/src/Main.java
@@ -43,21 +43,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_signed(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_signed(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
@@ -83,21 +83,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
@@ -121,21 +121,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_signed(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_signed(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_signed(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
@@ -162,21 +162,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_unsigned(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
@@ -200,7 +200,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_signed_constant(short[], short[]) loop_optimization (after)
@@ -208,7 +208,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_signed_constant(short[], short[]) loop_optimization (after)
@@ -216,7 +216,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed_constant(short[] b1, short[] bo) {
int min_length = Math.min(bo.length, b1.length);
@@ -241,7 +241,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (after)
@@ -249,7 +249,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (after)
@@ -257,7 +257,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned_constant(short[] b1, short[] bo) {
int min_length = Math.min(bo.length, b1.length);
diff --git a/test/646-checker-hadd-byte/src/Main.java b/test/646-checker-hadd-byte/src/Main.java
index a9e844c..f0adca3 100644
--- a/test/646-checker-hadd-byte/src/Main.java
+++ b/test/646-checker-hadd-byte/src/Main.java
@@ -40,21 +40,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
@@ -80,21 +80,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
@@ -118,21 +118,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_signed(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
@@ -159,21 +159,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_unsigned(byte[] b1, byte[] b2, byte[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
@@ -197,7 +197,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_signed_constant(byte[], byte[]) loop_optimization (after)
@@ -205,7 +205,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_signed_constant(byte[], byte[]) loop_optimization (after)
@@ -213,7 +213,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed_constant(byte[] b1, byte[] bo) {
int min_length = Math.min(bo.length, b1.length);
@@ -238,7 +238,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (after)
@@ -246,7 +246,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (after)
@@ -254,7 +254,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned_constant(byte[] b1, byte[] bo) {
int min_length = Math.min(bo.length, b1.length);
diff --git a/test/646-checker-hadd-char/src/Main.java b/test/646-checker-hadd-char/src/Main.java
index 22eb7cb..94030cc 100644
--- a/test/646-checker-hadd-char/src/Main.java
+++ b/test/646-checker-hadd-char/src/Main.java
@@ -40,21 +40,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned(char[] b1, char[] b2, char[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
@@ -80,21 +80,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
// Note: HAnd has no impact (already a zero extension).
@@ -121,21 +121,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_unsigned(char[] b1, char[] b2, char[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
@@ -162,21 +162,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
// Note: HAnd has no impact (already a zero extension).
@@ -203,7 +203,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_unsigned_constant(char[], char[]) loop_optimization (after)
@@ -211,7 +211,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_unsigned_constant(char[], char[]) loop_optimization (after)
@@ -219,7 +219,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned_constant(char[] b1, char[] bo) {
int min_length = Math.min(bo.length, b1.length);
@@ -244,7 +244,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (after)
@@ -252,7 +252,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (after)
@@ -260,7 +260,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
// Note: HAnd has no impact (already a zero extension).
diff --git a/test/646-checker-hadd-short/src/Main.java b/test/646-checker-hadd-short/src/Main.java
index 756f8a8..4ed2356 100644
--- a/test/646-checker-hadd-short/src/Main.java
+++ b/test/646-checker-hadd-short/src/Main.java
@@ -40,21 +40,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_signed(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_signed(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
@@ -81,21 +81,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_signed_alt(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_signed_alt(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed_alt(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
@@ -122,21 +122,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
@@ -160,21 +160,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_signed(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_signed(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_signed(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
@@ -198,21 +198,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_signed_alt(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_signed_alt(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_signed_alt(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
@@ -240,21 +240,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_signed_alt2(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_signed_alt2(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:false rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_signed_alt2(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
@@ -282,21 +282,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_unsigned(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
@@ -323,21 +323,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.rounding_halving_add_unsigned_alt(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned_alt(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] unsigned:true rounded:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void rounding_halving_add_unsigned_alt(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
@@ -362,7 +362,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_signed_constant(short[], short[]) loop_optimization (after)
@@ -370,7 +370,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_signed_constant(short[], short[]) loop_optimization (after)
@@ -378,7 +378,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:false rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_signed_constant(short[] b1, short[] bo) {
int min_length = Math.min(bo.length, b1.length);
@@ -403,7 +403,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (after)
@@ -411,7 +411,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (after)
@@ -419,7 +419,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] unsigned:true rounded:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<HAdd>>] loop:<<Loop>> outer_loop:none
private static void halving_add_unsigned_constant(short[] b1, short[] bo) {
int min_length = Math.min(bo.length, b1.length);
diff --git a/test/648-inline-caches-unresolved/profile b/test/648-inline-caches-unresolved/profile
index 92c0a41..06bc8ad 100644
--- a/test/648-inline-caches-unresolved/profile
+++ b/test/648-inline-caches-unresolved/profile
@@ -1 +1 @@
-LMain;->inlineMonomorphicUnresolvedSuper(Ljava/lang/Object;)Ljava/lang/String;+LSubclass;
+SHLMain;->inlineMonomorphicUnresolvedSuper(Ljava/lang/Object;)Ljava/lang/String;+LSubclass;
diff --git a/test/651-checker-byte-simd-minmax/src/Main.java b/test/651-checker-byte-simd-minmax/src/Main.java
index 9643b90..44472a8 100644
--- a/test/651-checker-byte-simd-minmax/src/Main.java
+++ b/test/651-checker-byte-simd-minmax/src/Main.java
@@ -31,21 +31,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Int8 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMin(byte[], byte[], byte[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Int8 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.doitMin(byte[], byte[], byte[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Int8 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin(byte[] x, byte[] y, byte[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
@@ -69,21 +69,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint8 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMinUnsigned(byte[], byte[], byte[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint8 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.doitMinUnsigned(byte[], byte[], byte[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint8 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMinUnsigned(byte[] x, byte[] y, byte[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
@@ -104,21 +104,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Int8 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMax(byte[], byte[], byte[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Int8 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.doitMax(byte[], byte[], byte[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Int8 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMax(byte[] x, byte[] y, byte[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
@@ -142,21 +142,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint8 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMaxUnsigned(byte[], byte[], byte[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint8 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.doitMaxUnsigned(byte[], byte[], byte[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint8 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMaxUnsigned(byte[] x, byte[] y, byte[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
@@ -178,7 +178,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I100>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get>>,<<Repl>>] unsigned:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get>>,<<Repl>>] packed_type:Int8 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin100(byte[] x, byte[] y) {
int min = Math.min(x.length, y.length);
diff --git a/test/651-checker-char-simd-minmax/src/Main.java b/test/651-checker-char-simd-minmax/src/Main.java
index 8a0262c..93f21f8 100644
--- a/test/651-checker-char-simd-minmax/src/Main.java
+++ b/test/651-checker-char-simd-minmax/src/Main.java
@@ -31,21 +31,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMin(char[], char[], char[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.doitMin(char[], char[], char[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin(char[] x, char[] y, char[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
@@ -66,21 +66,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMax(char[], char[], char[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.doitMax(char[], char[], char[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMax(char[] x, char[] y, char[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
@@ -102,7 +102,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I100>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get>>,<<Repl>>] unsigned:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get>>,<<Repl>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin100(char[] x, char[] y) {
int min = Math.min(x.length, y.length);
diff --git a/test/651-checker-short-simd-minmax/src/Main.java b/test/651-checker-short-simd-minmax/src/Main.java
index ffbf73b..00569e4 100644
--- a/test/651-checker-short-simd-minmax/src/Main.java
+++ b/test/651-checker-short-simd-minmax/src/Main.java
@@ -31,21 +31,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Int16 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMin(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Int16 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.doitMin(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Int16 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin(short[] x, short[] y, short[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
@@ -69,21 +69,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMinUnsigned(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.doitMinUnsigned(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMinUnsigned(short[] x, short[] y, short[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
@@ -104,21 +104,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Int16 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMax(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Int16 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.doitMax(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Int16 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMax(short[] x, short[] y, short[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
@@ -142,21 +142,21 @@
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-ARM64: void Main.doitMaxUnsigned(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-MIPS64: void Main.doitMaxUnsigned(short[], short[], short[]) loop_optimization (after)
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:true loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Uint16 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMaxUnsigned(short[] x, short[] y, short[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
@@ -178,7 +178,7 @@
/// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I100>>] loop:none
/// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Get:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get>>,<<Repl>>] unsigned:false loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get>>,<<Repl>>] packed_type:Int16 loop:<<Loop>> outer_loop:none
/// CHECK-DAG: VecStore [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin100(short[] x, short[] y) {
int min = Math.min(x.length, y.length);
diff --git a/test/660-checker-simd-sad-short3/expected.txt b/test/660-checker-simd-sad-short3/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/660-checker-simd-sad-short3/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/660-checker-simd-sad-short3/info.txt b/test/660-checker-simd-sad-short3/info.txt
new file mode 100644
index 0000000..b56c119
--- /dev/null
+++ b/test/660-checker-simd-sad-short3/info.txt
@@ -0,0 +1 @@
+Functional tests on SAD vectorization.
diff --git a/test/660-checker-simd-sad-short3/src/Main.java b/test/660-checker-simd-sad-short3/src/Main.java
new file mode 100644
index 0000000..c8850b4
--- /dev/null
+++ b/test/660-checker-simd-sad-short3/src/Main.java
@@ -0,0 +1,351 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for SAD (sum of absolute differences).
+ *
+ * Some special cases: parameters, constants, invariants, casted computations.
+ */
+public class Main {
+
+ /// CHECK-START: int Main.sadShort2IntParamRight(short[], short) loop_optimization (before)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Param:s\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Sub:i\d+>> Sub [<<Get>>,<<Param>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Intrin:i\d+>> InvokeStaticOrDirect [<<Sub>>] intrinsic:MathAbsInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Intrin>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: int Main.sadShort2IntParamRight(short[], short) loop_optimization (after)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons8:i\d+>> IntConstant 8 loop:none
+ /// CHECK-DAG: <<Param:s\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Rep:d\d+>> VecReplicateScalar [<<Param>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi2>>,<<Load>>,<<Rep>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Cons8>>] loop:<<Loop>> outer_loop:none
+ private static int sadShort2IntParamRight(short[] s, short param) {
+ int sad = 0;
+ for (int i = 0; i < s.length; i++) {
+ sad += Math.abs(s[i] - param);
+ }
+ return sad;
+ }
+
+ /// CHECK-START: int Main.sadShort2IntParamLeft(short[], short) loop_optimization (before)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Param:s\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Sub:i\d+>> Sub [<<Param>>,<<Get>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Intrin:i\d+>> InvokeStaticOrDirect [<<Sub>>] intrinsic:MathAbsInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Intrin>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: int Main.sadShort2IntParamLeft(short[], short) loop_optimization (after)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons8:i\d+>> IntConstant 8 loop:none
+ /// CHECK-DAG: <<Param:s\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Rep:d\d+>> VecReplicateScalar [<<Param>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi2>>,<<Rep>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Cons8>>] loop:<<Loop>> outer_loop:none
+ private static int sadShort2IntParamLeft(short[] s, short param) {
+ int sad = 0;
+ for (int i = 0; i < s.length; i++) {
+ sad += Math.abs(param - s[i]);
+ }
+ return sad;
+ }
+
+ /// CHECK-START: int Main.sadShort2IntConstRight(short[]) loop_optimization (before)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<ConsI:i\d+>> IntConstant -32767 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<ConsI>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Intrin:i\d+>> InvokeStaticOrDirect [<<Add>>] intrinsic:MathAbsInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Intrin>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: int Main.sadShort2IntConstRight(short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons8:i\d+>> IntConstant 8 loop:none
+ /// CHECK-DAG: <<ConsI:i\d+>> IntConstant 32767 loop:none
+ /// CHECK-DAG: <<Rep:d\d+>> VecReplicateScalar [<<ConsI>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi2>>,<<Load>>,<<Rep>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Cons8>>] loop:<<Loop>> outer_loop:none
+ private static int sadShort2IntConstRight(short[] s) {
+ int sad = 0;
+ for (int i = 0; i < s.length; i++) {
+ sad += Math.abs(s[i] - 32767);
+ }
+ return sad;
+ }
+
+ /// CHECK-START: int Main.sadShort2IntConstLeft(short[]) loop_optimization (before)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<ConsI:i\d+>> IntConstant 32767 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Sub:i\d+>> Sub [<<ConsI>>,<<Get>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Intrin:i\d+>> InvokeStaticOrDirect [<<Sub>>] intrinsic:MathAbsInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Intrin>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: int Main.sadShort2IntConstLeft(short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons8:i\d+>> IntConstant 8 loop:none
+ /// CHECK-DAG: <<ConsI:i\d+>> IntConstant 32767 loop:none
+ /// CHECK-DAG: <<Rep:d\d+>> VecReplicateScalar [<<ConsI>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi2>>,<<Rep>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Cons8>>] loop:<<Loop>> outer_loop:none
+ private static int sadShort2IntConstLeft(short[] s) {
+ int sad = 0;
+ for (int i = 0; i < s.length; i++) {
+ sad += Math.abs(32767 - s[i]);
+ }
+ return sad;
+ }
+
+ /// CHECK-START: int Main.sadShort2IntInvariantRight(short[], int) loop_optimization (before)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Conv:s\d+>> TypeConversion [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Sub:i\d+>> Sub [<<Get>>,<<Conv>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Intrin:i\d+>> InvokeStaticOrDirect [<<Sub>>] intrinsic:MathAbsInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Intrin>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: int Main.sadShort2IntInvariantRight(short[], int) loop_optimization (after)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons8:i\d+>> IntConstant 8 loop:none
+ /// CHECK-DAG: <<Conv:s\d+>> TypeConversion [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Rep:d\d+>> VecReplicateScalar [<<Conv>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi2>>,<<Load>>,<<Rep>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Cons8>>] loop:<<Loop>> outer_loop:none
+ private static int sadShort2IntInvariantRight(short[] s, int val) {
+ int sad = 0;
+ short x = (short) (val + 1);
+ for (int i = 0; i < s.length; i++) {
+ sad += Math.abs(s[i] - x);
+ }
+ return sad;
+ }
+
+ /// CHECK-START: int Main.sadShort2IntInvariantLeft(short[], int) loop_optimization (before)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Conv:s\d+>> TypeConversion [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Sub:i\d+>> Sub [<<Conv>>,<<Get>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Intrin:i\d+>> InvokeStaticOrDirect [<<Sub>>] intrinsic:MathAbsInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Intrin>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: int Main.sadShort2IntInvariantLeft(short[], int) loop_optimization (after)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons8:i\d+>> IntConstant 8 loop:none
+ /// CHECK-DAG: <<Conv:s\d+>> TypeConversion [{{i\d+}}] loop:none
+ /// CHECK-DAG: <<Rep:d\d+>> VecReplicateScalar [<<Conv>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi2>>,<<Rep>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ private static int sadShort2IntInvariantLeft(short[] s, int val) {
+ int sad = 0;
+ short x = (short) (val + 1);
+ for (int i = 0; i < s.length; i++) {
+ sad += Math.abs(x - s[i]);
+ }
+ return sad;
+ }
+
+ /// CHECK-START: int Main.sadShort2IntCastedExprRight(short[]) loop_optimization (before)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<ConsI:i\d+>> IntConstant 110 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> [<<Get>>,<<ConsI>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Conv:s\d+>> TypeConversion [<<Add>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Sub:i\d+>> Sub [<<Get>>,<<Conv>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Intrin:i\d+>> InvokeStaticOrDirect [<<Sub>>] intrinsic:MathAbsInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Intrin>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: int Main.sadShort2IntCastedExprRight(short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons8:i\d+>> IntConstant 8 loop:none
+ /// CHECK-DAG: <<ConsI:i\d+>> IntConstant 110 loop:none
+ /// CHECK-DAG: <<Rep:d\d+>> VecReplicateScalar [<<ConsI>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:d\d+>> VecAdd [<<Load>>,<<Rep>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi2>>,<<Load>>,<<Add>>] loop:<<Loop>> outer_loop:none
+ private static int sadShort2IntCastedExprRight(short[] s) {
+ int sad = 0;
+ for (int i = 0; i < s.length; i++) {
+ short x = (short) (s[i] + 110); // narrower part sign extends
+ sad += Math.abs(s[i] - x);
+ }
+ return sad;
+ }
+
+ /// CHECK-START: int Main.sadShort2IntCastedExprLeft(short[]) loop_optimization (before)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<ConsI:i\d+>> IntConstant 110 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:i\d+>> [<<Get>>,<<ConsI>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Conv:s\d+>> TypeConversion [<<Add>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Sub:i\d+>> Sub [<<Conv>>,<<Get>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Intrin:i\d+>> InvokeStaticOrDirect [<<Sub>>] intrinsic:MathAbsInt loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Intrin>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: int Main.sadShort2IntCastedExprLeft(short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Cons8:i\d+>> IntConstant 8 loop:none
+ /// CHECK-DAG: <<ConsI:i\d+>> IntConstant 110 loop:none
+ /// CHECK-DAG: <<Rep:d\d+>> VecReplicateScalar [<<ConsI>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Cons0>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add:d\d+>> VecAdd [<<Load>>,<<Rep>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<SAD:d\d+>> VecSADAccumulate [<<Phi2>>,<<Add>>,<<Load>>] loop:<<Loop>> outer_loop:none
+ private static int sadShort2IntCastedExprLeft(short[] s) {
+ int sad = 0;
+ for (int i = 0; i < s.length; i++) {
+ short x = (short) (s[i] + 110); // narrower part sign extends
+ sad += Math.abs(x - s[i]);
+ }
+ return sad;
+ }
+
+ public static void main(String[] args) {
+ short[] interesting = {
+ (short) 0x0000,
+ (short) 0x0001,
+ (short) 0x0002,
+ (short) 0x0003,
+ (short) 0x0004,
+ (short) 0x1234,
+ (short) 0x8000,
+ (short) 0x8001,
+ (short) 0x8002,
+ (short) 0x8003,
+ (short) 0x8004,
+ (short) 0x8004,
+ (short) 0x7000,
+ (short) 0x7fff,
+ (short) 0xf000,
+ (short) 0xffff
+ };
+ short[] s = new short[64];
+ for (int i = 0; i < 64; i++) {
+ s[i] = interesting[i % interesting.length];
+ }
+
+ expectEquals(1067200, sadShort2IntParamRight(s, (short)-1));
+ expectEquals(1067200, sadShort2IntParamRight(s, (short) 0));
+ expectEquals(1067208, sadShort2IntParamRight(s, (short) 1));
+ expectEquals(1067224, sadShort2IntParamRight(s, (short) 2));
+ expectEquals(2635416, sadShort2IntParamRight(s, (short) 0x7fff));
+ expectEquals(1558824, sadShort2IntParamRight(s, (short) 0x8000));
+
+ expectEquals(1067200, sadShort2IntParamLeft(s, (short)-1));
+ expectEquals(1067200, sadShort2IntParamLeft(s, (short) 0));
+ expectEquals(1067208, sadShort2IntParamLeft(s, (short) 1));
+ expectEquals(1067224, sadShort2IntParamLeft(s, (short) 2));
+ expectEquals(2635416, sadShort2IntParamLeft(s, (short) 0x7fff));
+ expectEquals(1558824, sadShort2IntParamLeft(s, (short) 0x8000));
+
+ expectEquals(2635416, sadShort2IntConstRight(s));
+ expectEquals(2635416, sadShort2IntConstLeft(s));
+
+ expectEquals(1067200, sadShort2IntInvariantRight(s, -2));
+ expectEquals(1067200, sadShort2IntInvariantRight(s, -1));
+ expectEquals(1067208, sadShort2IntInvariantRight(s, 0));
+ expectEquals(1067224, sadShort2IntInvariantRight(s, 1));
+ expectEquals(2635416, sadShort2IntInvariantRight(s, 0x7ffe));
+ expectEquals(1558824, sadShort2IntInvariantRight(s, 0x7fff));
+
+ expectEquals(1067200, sadShort2IntInvariantLeft(s, -2));
+ expectEquals(1067200, sadShort2IntInvariantLeft(s, -1));
+ expectEquals(1067208, sadShort2IntInvariantLeft(s, 0));
+ expectEquals(1067224, sadShort2IntInvariantLeft(s, 1));
+ expectEquals(2635416, sadShort2IntInvariantLeft(s, 0x7ffe));
+ expectEquals(1558824, sadShort2IntInvariantLeft(s, 0x7fff));
+
+ expectEquals(268304, sadShort2IntCastedExprLeft(s));
+ expectEquals(268304, sadShort2IntCastedExprRight(s));
+
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ private static void expectEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+}
diff --git a/test/661-oat-writer-layout/expected.txt b/test/661-oat-writer-layout/expected.txt
index db28e4f..b7ad70a 100644
--- a/test/661-oat-writer-layout/expected.txt
+++ b/test/661-oat-writer-layout/expected.txt
@@ -8,15 +8,6 @@
C::m_a$$$
C::m_b$$$
C::m_c$$$
-A::m_a$Hot$$
-A::m_b$Hot$$
-A::m_c$Hot$$
-B::m_a$Hot$$
-B::m_b$Hot$$
-B::m_c$Hot$$
-C::m_a$Hot$$
-C::m_b$Hot$$
-C::m_c$Hot$$
A::m_a$$Startup$
A::m_b$$Startup$
A::m_c$$Startup$
diff --git a/test/661-oat-writer-layout/profile b/test/661-oat-writer-layout/profile
index 5406484..cf307c2 100644
--- a/test/661-oat-writer-layout/profile
+++ b/test/661-oat-writer-layout/profile
@@ -1,60 +1,51 @@
-HLA;->m_a$Hot$$()V
SLA;->m_a$$Startup$()V
HSLA;->m_a$Hot$Startup$()V
PLA;->m_a$$$Poststartup()V
HPLA;->m_a$Hot$$Poststartup()V
SPLA;->m_a$$Startup$Poststartup()V
HSPLA;->m_a$Hot$Startup$Poststartup()V
-HLA;->m_b$Hot$$()V
SLA;->m_b$$Startup$()V
HSLA;->m_b$Hot$Startup$()V
PLA;->m_b$$$Poststartup()V
HPLA;->m_b$Hot$$Poststartup()V
SPLA;->m_b$$Startup$Poststartup()V
HSPLA;->m_b$Hot$Startup$Poststartup()V
-HLA;->m_c$Hot$$()V
SLA;->m_c$$Startup$()V
HSLA;->m_c$Hot$Startup$()V
PLA;->m_c$$$Poststartup()V
HPLA;->m_c$Hot$$Poststartup()V
SPLA;->m_c$$Startup$Poststartup()V
HSPLA;->m_c$Hot$Startup$Poststartup()V
-HLB;->m_a$Hot$$()V
SLB;->m_a$$Startup$()V
HSLB;->m_a$Hot$Startup$()V
PLB;->m_a$$$Poststartup()V
HPLB;->m_a$Hot$$Poststartup()V
SPLB;->m_a$$Startup$Poststartup()V
HSPLB;->m_a$Hot$Startup$Poststartup()V
-HLB;->m_b$Hot$$()V
SLB;->m_b$$Startup$()V
HSLB;->m_b$Hot$Startup$()V
PLB;->m_b$$$Poststartup()V
HPLB;->m_b$Hot$$Poststartup()V
SPLB;->m_b$$Startup$Poststartup()V
HSPLB;->m_b$Hot$Startup$Poststartup()V
-HLB;->m_c$Hot$$()V
SLB;->m_c$$Startup$()V
HSLB;->m_c$Hot$Startup$()V
PLB;->m_c$$$Poststartup()V
HPLB;->m_c$Hot$$Poststartup()V
SPLB;->m_c$$Startup$Poststartup()V
HSPLB;->m_c$Hot$Startup$Poststartup()V
-HLC;->m_a$Hot$$()V
SLC;->m_a$$Startup$()V
HSLC;->m_a$Hot$Startup$()V
PLC;->m_a$$$Poststartup()V
HPLC;->m_a$Hot$$Poststartup()V
SPLC;->m_a$$Startup$Poststartup()V
HSPLC;->m_a$Hot$Startup$Poststartup()V
-HLC;->m_b$Hot$$()V
SLC;->m_b$$Startup$()V
HSLC;->m_b$Hot$Startup$()V
PLC;->m_b$$$Poststartup()V
HPLC;->m_b$Hot$$Poststartup()V
SPLC;->m_b$$Startup$Poststartup()V
HSPLC;->m_b$Hot$Startup$Poststartup()V
-HLC;->m_c$Hot$$()V
SLC;->m_c$$Startup$()V
HSLC;->m_c$Hot$Startup$()V
PLC;->m_c$$$Poststartup()V
diff --git a/test/661-oat-writer-layout/src/Test.java b/test/661-oat-writer-layout/src/Test.java
index db67b48..f862e37 100644
--- a/test/661-oat-writer-layout/src/Test.java
+++ b/test/661-oat-writer-layout/src/Test.java
@@ -13,86 +13,78 @@
// limitations under the License.
import java.lang.reflect.Method;
+import java.util.ArrayList;
public class Test {
// Returns list of all methods in Generated.java
// This is to avoid having to introspect classes with extra code
// (for example, we ignore <init> methods).
public static Method[] getTestMethods() throws NoSuchMethodException, SecurityException {
- Method[] all_methods = new Method[72];
- all_methods[0] = A.class.getDeclaredMethod("m_a$$$");
- all_methods[1] = A.class.getDeclaredMethod("m_a$Hot$$");
- all_methods[2] = A.class.getDeclaredMethod("m_a$$Startup$");
- all_methods[3] = A.class.getDeclaredMethod("m_a$Hot$Startup$");
- all_methods[4] = A.class.getDeclaredMethod("m_a$$$Poststartup");
- all_methods[5] = A.class.getDeclaredMethod("m_a$Hot$$Poststartup");
- all_methods[6] = A.class.getDeclaredMethod("m_a$$Startup$Poststartup");
- all_methods[7] = A.class.getDeclaredMethod("m_a$Hot$Startup$Poststartup");
- all_methods[8] = A.class.getDeclaredMethod("m_b$$$");
- all_methods[9] = A.class.getDeclaredMethod("m_b$Hot$$");
- all_methods[10] = A.class.getDeclaredMethod("m_b$$Startup$");
- all_methods[11] = A.class.getDeclaredMethod("m_b$Hot$Startup$");
- all_methods[12] = A.class.getDeclaredMethod("m_b$$$Poststartup");
- all_methods[13] = A.class.getDeclaredMethod("m_b$Hot$$Poststartup");
- all_methods[14] = A.class.getDeclaredMethod("m_b$$Startup$Poststartup");
- all_methods[15] = A.class.getDeclaredMethod("m_b$Hot$Startup$Poststartup");
- all_methods[16] = A.class.getDeclaredMethod("m_c$$$");
- all_methods[17] = A.class.getDeclaredMethod("m_c$Hot$$");
- all_methods[18] = A.class.getDeclaredMethod("m_c$$Startup$");
- all_methods[19] = A.class.getDeclaredMethod("m_c$Hot$Startup$");
- all_methods[20] = A.class.getDeclaredMethod("m_c$$$Poststartup");
- all_methods[21] = A.class.getDeclaredMethod("m_c$Hot$$Poststartup");
- all_methods[22] = A.class.getDeclaredMethod("m_c$$Startup$Poststartup");
- all_methods[23] = A.class.getDeclaredMethod("m_c$Hot$Startup$Poststartup");
- all_methods[24] = B.class.getDeclaredMethod("m_a$$$");
- all_methods[25] = B.class.getDeclaredMethod("m_a$Hot$$");
- all_methods[26] = B.class.getDeclaredMethod("m_a$$Startup$");
- all_methods[27] = B.class.getDeclaredMethod("m_a$Hot$Startup$");
- all_methods[28] = B.class.getDeclaredMethod("m_a$$$Poststartup");
- all_methods[29] = B.class.getDeclaredMethod("m_a$Hot$$Poststartup");
- all_methods[30] = B.class.getDeclaredMethod("m_a$$Startup$Poststartup");
- all_methods[31] = B.class.getDeclaredMethod("m_a$Hot$Startup$Poststartup");
- all_methods[32] = B.class.getDeclaredMethod("m_b$$$");
- all_methods[33] = B.class.getDeclaredMethod("m_b$Hot$$");
- all_methods[34] = B.class.getDeclaredMethod("m_b$$Startup$");
- all_methods[35] = B.class.getDeclaredMethod("m_b$Hot$Startup$");
- all_methods[36] = B.class.getDeclaredMethod("m_b$$$Poststartup");
- all_methods[37] = B.class.getDeclaredMethod("m_b$Hot$$Poststartup");
- all_methods[38] = B.class.getDeclaredMethod("m_b$$Startup$Poststartup");
- all_methods[39] = B.class.getDeclaredMethod("m_b$Hot$Startup$Poststartup");
- all_methods[40] = B.class.getDeclaredMethod("m_c$$$");
- all_methods[41] = B.class.getDeclaredMethod("m_c$Hot$$");
- all_methods[42] = B.class.getDeclaredMethod("m_c$$Startup$");
- all_methods[43] = B.class.getDeclaredMethod("m_c$Hot$Startup$");
- all_methods[44] = B.class.getDeclaredMethod("m_c$$$Poststartup");
- all_methods[45] = B.class.getDeclaredMethod("m_c$Hot$$Poststartup");
- all_methods[46] = B.class.getDeclaredMethod("m_c$$Startup$Poststartup");
- all_methods[47] = B.class.getDeclaredMethod("m_c$Hot$Startup$Poststartup");
- all_methods[48] = C.class.getDeclaredMethod("m_a$$$");
- all_methods[49] = C.class.getDeclaredMethod("m_a$Hot$$");
- all_methods[50] = C.class.getDeclaredMethod("m_a$$Startup$");
- all_methods[51] = C.class.getDeclaredMethod("m_a$Hot$Startup$");
- all_methods[52] = C.class.getDeclaredMethod("m_a$$$Poststartup");
- all_methods[53] = C.class.getDeclaredMethod("m_a$Hot$$Poststartup");
- all_methods[54] = C.class.getDeclaredMethod("m_a$$Startup$Poststartup");
- all_methods[55] = C.class.getDeclaredMethod("m_a$Hot$Startup$Poststartup");
- all_methods[56] = C.class.getDeclaredMethod("m_b$$$");
- all_methods[57] = C.class.getDeclaredMethod("m_b$Hot$$");
- all_methods[58] = C.class.getDeclaredMethod("m_b$$Startup$");
- all_methods[59] = C.class.getDeclaredMethod("m_b$Hot$Startup$");
- all_methods[60] = C.class.getDeclaredMethod("m_b$$$Poststartup");
- all_methods[61] = C.class.getDeclaredMethod("m_b$Hot$$Poststartup");
- all_methods[62] = C.class.getDeclaredMethod("m_b$$Startup$Poststartup");
- all_methods[63] = C.class.getDeclaredMethod("m_b$Hot$Startup$Poststartup");
- all_methods[64] = C.class.getDeclaredMethod("m_c$$$");
- all_methods[65] = C.class.getDeclaredMethod("m_c$Hot$$");
- all_methods[66] = C.class.getDeclaredMethod("m_c$$Startup$");
- all_methods[67] = C.class.getDeclaredMethod("m_c$Hot$Startup$");
- all_methods[68] = C.class.getDeclaredMethod("m_c$$$Poststartup");
- all_methods[69] = C.class.getDeclaredMethod("m_c$Hot$$Poststartup");
- all_methods[70] = C.class.getDeclaredMethod("m_c$$Startup$Poststartup");
- all_methods[71] = C.class.getDeclaredMethod("m_c$Hot$Startup$Poststartup");
- return all_methods;
+ ArrayList<Method> all_methods = new ArrayList<Method>();
+ all_methods.add(A.class.getDeclaredMethod("m_a$$$"));
+ all_methods.add(A.class.getDeclaredMethod("m_a$$Startup$"));
+ all_methods.add(A.class.getDeclaredMethod("m_a$Hot$Startup$"));
+ all_methods.add(A.class.getDeclaredMethod("m_a$$$Poststartup"));
+ all_methods.add(A.class.getDeclaredMethod("m_a$Hot$$Poststartup"));
+ all_methods.add(A.class.getDeclaredMethod("m_a$$Startup$Poststartup"));
+ all_methods.add(A.class.getDeclaredMethod("m_a$Hot$Startup$Poststartup"));
+ all_methods.add(A.class.getDeclaredMethod("m_b$$$"));
+ all_methods.add(A.class.getDeclaredMethod("m_b$$Startup$"));
+ all_methods.add(A.class.getDeclaredMethod("m_b$Hot$Startup$"));
+ all_methods.add(A.class.getDeclaredMethod("m_b$$$Poststartup"));
+ all_methods.add(A.class.getDeclaredMethod("m_b$Hot$$Poststartup"));
+ all_methods.add(A.class.getDeclaredMethod("m_b$$Startup$Poststartup"));
+ all_methods.add(A.class.getDeclaredMethod("m_b$Hot$Startup$Poststartup"));
+ all_methods.add(A.class.getDeclaredMethod("m_c$$$"));
+ all_methods.add(A.class.getDeclaredMethod("m_c$$Startup$"));
+ all_methods.add(A.class.getDeclaredMethod("m_c$Hot$Startup$"));
+ all_methods.add(A.class.getDeclaredMethod("m_c$$$Poststartup"));
+ all_methods.add(A.class.getDeclaredMethod("m_c$Hot$$Poststartup"));
+ all_methods.add(A.class.getDeclaredMethod("m_c$$Startup$Poststartup"));
+ all_methods.add(A.class.getDeclaredMethod("m_c$Hot$Startup$Poststartup"));
+ all_methods.add(B.class.getDeclaredMethod("m_a$$$"));
+ all_methods.add(B.class.getDeclaredMethod("m_a$$Startup$"));
+ all_methods.add(B.class.getDeclaredMethod("m_a$Hot$Startup$"));
+ all_methods.add(B.class.getDeclaredMethod("m_a$$$Poststartup"));
+ all_methods.add(B.class.getDeclaredMethod("m_a$Hot$$Poststartup"));
+ all_methods.add(B.class.getDeclaredMethod("m_a$$Startup$Poststartup"));
+ all_methods.add(B.class.getDeclaredMethod("m_a$Hot$Startup$Poststartup"));
+ all_methods.add(B.class.getDeclaredMethod("m_b$$$"));
+ all_methods.add(B.class.getDeclaredMethod("m_b$$Startup$"));
+ all_methods.add(B.class.getDeclaredMethod("m_b$Hot$Startup$"));
+ all_methods.add(B.class.getDeclaredMethod("m_b$$$Poststartup"));
+ all_methods.add(B.class.getDeclaredMethod("m_b$Hot$$Poststartup"));
+ all_methods.add(B.class.getDeclaredMethod("m_b$$Startup$Poststartup"));
+ all_methods.add(B.class.getDeclaredMethod("m_b$Hot$Startup$Poststartup"));
+ all_methods.add(B.class.getDeclaredMethod("m_c$$$"));
+ all_methods.add(B.class.getDeclaredMethod("m_c$$Startup$"));
+ all_methods.add(B.class.getDeclaredMethod("m_c$Hot$Startup$"));
+ all_methods.add(B.class.getDeclaredMethod("m_c$$$Poststartup"));
+ all_methods.add(B.class.getDeclaredMethod("m_c$Hot$$Poststartup"));
+ all_methods.add(B.class.getDeclaredMethod("m_c$$Startup$Poststartup"));
+ all_methods.add(B.class.getDeclaredMethod("m_c$Hot$Startup$Poststartup"));
+ all_methods.add(C.class.getDeclaredMethod("m_a$$$"));
+ all_methods.add(C.class.getDeclaredMethod("m_a$$Startup$"));
+ all_methods.add(C.class.getDeclaredMethod("m_a$Hot$Startup$"));
+ all_methods.add(C.class.getDeclaredMethod("m_a$$$Poststartup"));
+ all_methods.add(C.class.getDeclaredMethod("m_a$Hot$$Poststartup"));
+ all_methods.add(C.class.getDeclaredMethod("m_a$$Startup$Poststartup"));
+ all_methods.add(C.class.getDeclaredMethod("m_a$Hot$Startup$Poststartup"));
+ all_methods.add(C.class.getDeclaredMethod("m_b$$$"));
+ all_methods.add(C.class.getDeclaredMethod("m_b$$Startup$"));
+ all_methods.add(C.class.getDeclaredMethod("m_b$Hot$Startup$"));
+ all_methods.add(C.class.getDeclaredMethod("m_b$$$Poststartup"));
+ all_methods.add(C.class.getDeclaredMethod("m_b$Hot$$Poststartup"));
+ all_methods.add(C.class.getDeclaredMethod("m_b$$Startup$Poststartup"));
+ all_methods.add(C.class.getDeclaredMethod("m_b$Hot$Startup$Poststartup"));
+ all_methods.add(C.class.getDeclaredMethod("m_c$$$"));
+ all_methods.add(C.class.getDeclaredMethod("m_c$$Startup$"));
+ all_methods.add(C.class.getDeclaredMethod("m_c$Hot$Startup$"));
+ all_methods.add(C.class.getDeclaredMethod("m_c$$$Poststartup"));
+ all_methods.add(C.class.getDeclaredMethod("m_c$Hot$$Poststartup"));
+ all_methods.add(C.class.getDeclaredMethod("m_c$$Startup$Poststartup"));
+ all_methods.add(C.class.getDeclaredMethod("m_c$Hot$Startup$Poststartup"));
+ return all_methods.toArray(new Method[all_methods.size()]);
}
}
diff --git a/test/707-checker-invalid-profile/profile b/test/707-checker-invalid-profile/profile
index 5979dd2..f142c40 100644
--- a/test/707-checker-invalid-profile/profile
+++ b/test/707-checker-invalid-profile/profile
@@ -1,4 +1,4 @@
-LMain;->attemptInlineMonomorphic(LMain;)I+invalid_class
-LMain;->attemptInlinePolymorphic(LMain;)I+LMain;,invalid_class
-LMain;->invalid_method
+SHLMain;->attemptInlineMonomorphic(LMain;)I+invalid_class
+SHLMain;->attemptInlinePolymorphic(LMain;)I+LMain;,invalid_class
+SHLMain;->invalid_method
invalid_class
\ No newline at end of file
diff --git a/test/709-checker-varhandles/build b/test/709-checker-varhandles/build
new file mode 100755
index 0000000..2b0b2c1
--- /dev/null
+++ b/test/709-checker-varhandles/build
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+./default-build "$@" --experimental method-handles
diff --git a/test/709-checker-varhandles/expected.txt b/test/709-checker-varhandles/expected.txt
new file mode 100644
index 0000000..651da72
--- /dev/null
+++ b/test/709-checker-varhandles/expected.txt
@@ -0,0 +1,2 @@
+starting
+passed
diff --git a/test/709-checker-varhandles/info.txt b/test/709-checker-varhandles/info.txt
new file mode 100644
index 0000000..2221240
--- /dev/null
+++ b/test/709-checker-varhandles/info.txt
@@ -0,0 +1 @@
+Test support for intrinsics in Java 9 java.lang.invoke.VarHandle.
diff --git a/test/709-checker-varhandles/src-art/Main.java b/test/709-checker-varhandles/src-art/Main.java
new file mode 100644
index 0000000..46aaa38
--- /dev/null
+++ b/test/709-checker-varhandles/src-art/Main.java
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.invoke.VarHandle;
+
+/**
+ * Checker test on the 1.8 unsafe operations. Note, this is by no means an
+ * exhaustive unit test for these CAS (compare-and-swap) and fence operations.
+ * Instead, this test ensures the methods are recognized as intrinsic and behave
+ * as expected.
+ */
+public class Main {
+
+ //
+ // Fences (native).
+ //
+
+ /// CHECK-START: void Main.fullFence() intrinsics_recognition (after)
+ /// CHECK-DAG: InvokeStaticOrDirect intrinsic:VarHandleFullFence
+ //
+ /// CHECK-START: void Main.fullFence() instruction_simplifier (after)
+ /// CHECK-NOT: InvokeStaticOrDirect intrinsic:VarHandleFullFence
+ //
+ /// CHECK-START: void Main.fullFence() instruction_simplifier (after)
+ /// CHECK-DAG: MemoryBarrier kind:AnyAny
+ private static void fullFence() {
+ VarHandle.fullFence();
+ }
+
+ /// CHECK-START: void Main.acquireFence() intrinsics_recognition (after)
+ /// CHECK-DAG: InvokeStaticOrDirect intrinsic:VarHandleAcquireFence
+ //
+ /// CHECK-START: void Main.acquireFence() instruction_simplifier (after)
+ /// CHECK-NOT: InvokeStaticOrDirect intrinsic:VarHandleAcquireFence
+ //
+ /// CHECK-START: void Main.acquireFence() instruction_simplifier (after)
+ /// CHECK-DAG: MemoryBarrier kind:LoadAny
+ private static void acquireFence() {
+ VarHandle.acquireFence();
+ }
+
+ /// CHECK-START: void Main.releaseFence() intrinsics_recognition (after)
+ /// CHECK-DAG: InvokeStaticOrDirect intrinsic:VarHandleReleaseFence
+ //
+ /// CHECK-START: void Main.releaseFence() instruction_simplifier (after)
+ /// CHECK-NOT: InvokeStaticOrDirect intrinsic:VarHandleReleaseFence
+ //
+ /// CHECK-START: void Main.releaseFence() instruction_simplifier (after)
+ /// CHECK-DAG: MemoryBarrier kind:AnyStore
+ private static void releaseFence() {
+ VarHandle.releaseFence();
+ }
+
+ /// CHECK-START: void Main.loadLoadFence() intrinsics_recognition (after)
+ /// CHECK-DAG: InvokeStaticOrDirect intrinsic:VarHandleLoadLoadFence
+ //
+ /// CHECK-START: void Main.loadLoadFence() instruction_simplifier (after)
+ /// CHECK-NOT: InvokeStaticOrDirect intrinsic:VarHandleLoadLoadFence
+ //
+ /// CHECK-START: void Main.loadLoadFence() instruction_simplifier (after)
+ /// CHECK-DAG: MemoryBarrier kind:LoadAny
+ private static void loadLoadFence() {
+ VarHandle.loadLoadFence();
+ }
+
+ /// CHECK-START: void Main.storeStoreFence() intrinsics_recognition (after)
+ /// CHECK-DAG: InvokeStaticOrDirect intrinsic:VarHandleStoreStoreFence
+ //
+ /// CHECK-START: void Main.storeStoreFence() instruction_simplifier (after)
+ /// CHECK-NOT: InvokeStaticOrDirect intrinsic:VarHandleStoreStoreFence
+ //
+ /// CHECK-START: void Main.storeStoreFence() instruction_simplifier (after)
+ /// CHECK-DAG: MemoryBarrier kind:StoreStore
+ private static void storeStoreFence() {
+ VarHandle.storeStoreFence();
+ }
+
+ //
+ // Driver.
+ //
+
+ public static void main(String[] args) {
+ System.out.println("starting");
+ acquireFence();
+ releaseFence();
+ loadLoadFence();
+ storeStoreFence();
+ fullFence();
+ System.out.println("passed");
+ }
+}
diff --git a/test/910-methods/build b/test/910-methods/build
new file mode 100644
index 0000000..10ffcc5
--- /dev/null
+++ b/test/910-methods/build
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See b/65168732
+export USE_D8=false
+
+./default-build "$@"
diff --git a/test/910-methods/check b/test/910-methods/check
index 8358500..e6f7d77 100644
--- a/test/910-methods/check
+++ b/test/910-methods/check
@@ -19,4 +19,8 @@
patch -p0 expected.txt < expected_jack.diff
fi
+if [[ "$USE_D8" == true ]]; then
+ patch -p0 expected.txt < expected_d8.diff
+fi
+
./default-check "$@"
diff --git a/test/910-methods/expected_d8.diff b/test/910-methods/expected_d8.diff
new file mode 100644
index 0000000..2c5d085
--- /dev/null
+++ b/test/910-methods/expected_d8.diff
@@ -0,0 +1,4 @@
+7c7
+< Location end: 39
+---
+> Location end: 36
diff --git a/test/911-get-stack-trace/build b/test/911-get-stack-trace/build
new file mode 100644
index 0000000..10ffcc5
--- /dev/null
+++ b/test/911-get-stack-trace/build
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See b/65168732
+export USE_D8=false
+
+./default-build "$@"
diff --git a/test/913-heaps/build b/test/913-heaps/build
new file mode 100644
index 0000000..10ffcc5
--- /dev/null
+++ b/test/913-heaps/build
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See b/65168732
+export USE_D8=false
+
+./default-build "$@"
diff --git a/test/979-const-method-handle/build b/test/979-const-method-handle/build
new file mode 100644
index 0000000..495557e
--- /dev/null
+++ b/test/979-const-method-handle/build
@@ -0,0 +1,22 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Stop if something fails.
+set -e
+
+${DX} --dex --min-sdk-version=28 --output=classes.dex classes
+
+zip $TEST_NAME.jar classes.dex
diff --git a/test/979-const-method-handle/classes/Main.class b/test/979-const-method-handle/classes/Main.class
new file mode 100644
index 0000000..8d6b7d8
--- /dev/null
+++ b/test/979-const-method-handle/classes/Main.class
Binary files differ
diff --git a/test/979-const-method-handle/classes/constmethodhandle/ConstTest.class b/test/979-const-method-handle/classes/constmethodhandle/ConstTest.class
new file mode 100644
index 0000000..a21b0a3
--- /dev/null
+++ b/test/979-const-method-handle/classes/constmethodhandle/ConstTest.class
Binary files differ
diff --git a/test/979-const-method-handle/expected.txt b/test/979-const-method-handle/expected.txt
new file mode 100644
index 0000000..573b80d
--- /dev/null
+++ b/test/979-const-method-handle/expected.txt
@@ -0,0 +1,2 @@
+MethodHandle MethodHandle(Object)Class => class java.lang.Float
+MethodType (char,short,int,long,float,double,Object)boolean
diff --git a/test/979-const-method-handle/info.txt b/test/979-const-method-handle/info.txt
new file mode 100644
index 0000000..e8514ce
--- /dev/null
+++ b/test/979-const-method-handle/info.txt
@@ -0,0 +1,7 @@
+This test checks const-method-handle and const-method-type bytecodes.
+
+The class files in this test come from:
+
+ dalvik/dx/tests/142-const-method-handle
+
+and are built using ASM bytecode manipulation library.
diff --git a/test/988-method-trace/gen_srcs.py b/test/988-method-trace/gen_srcs.py
index c1ce35c..8f1082f 100755
--- a/test/988-method-trace/gen_srcs.py
+++ b/test/988-method-trace/gen_srcs.py
@@ -38,7 +38,8 @@
IDX_CLASS_NAME = -3
# Exclude all hidden API.
-KLASS_BLACK_LIST = ['sun.misc.Unsafe', 'libcore.io.Memory', 'java.lang.StringFactory']
+KLASS_BLACK_LIST = ['sun.misc.Unsafe', 'libcore.io.Memory', 'java.lang.StringFactory',
+ 'java.lang.invoke.VarHandle' ] # TODO(b/65872996): Enable when VarHandle is visible.
METHOD_BLACK_LIST = [('java.lang.ref.Reference', 'getReferent'),
('java.lang.String', 'getCharsNoCheck'),
('java.lang.System', 'arraycopy')] # arraycopy has a manual test.
diff --git a/test/988-method-trace/src/art/Trace.java b/test/988-method-trace/src/art/Trace.java
index ba3d397..8999bb1 100644
--- a/test/988-method-trace/src/art/Trace.java
+++ b/test/988-method-trace/src/art/Trace.java
@@ -53,4 +53,16 @@
public static native void watchFieldModification(Field f);
public static native void watchAllFieldAccesses();
public static native void watchAllFieldModifications();
+
+ // the names, arguments, and even line numbers of these functions are embedded in the tests so we
+ // need to add to the bottom and not modify old ones to maintain compat.
+ public static native void enableTracing2(Class<?> methodClass,
+ Method entryMethod,
+ Method exitMethod,
+ Method fieldAccess,
+ Method fieldModify,
+ Method singleStep,
+ Method ThreadStart,
+ Method ThreadEnd,
+ Thread thr);
}
diff --git a/test/989-method-trace-throw/src/art/Trace.java b/test/989-method-trace-throw/src/art/Trace.java
index ba3d397..8999bb1 100644
--- a/test/989-method-trace-throw/src/art/Trace.java
+++ b/test/989-method-trace-throw/src/art/Trace.java
@@ -53,4 +53,16 @@
public static native void watchFieldModification(Field f);
public static native void watchAllFieldAccesses();
public static native void watchAllFieldModifications();
+
+ // the names, arguments, and even line numbers of these functions are embedded in the tests so we
+ // need to add to the bottom and not modify old ones to maintain compat.
+ public static native void enableTracing2(Class<?> methodClass,
+ Method entryMethod,
+ Method exitMethod,
+ Method fieldAccess,
+ Method fieldModify,
+ Method singleStep,
+ Method ThreadStart,
+ Method ThreadEnd,
+ Thread thr);
}
diff --git a/test/990-field-trace/src/art/Trace.java b/test/990-field-trace/src/art/Trace.java
index ba3d397..8999bb1 100644
--- a/test/990-field-trace/src/art/Trace.java
+++ b/test/990-field-trace/src/art/Trace.java
@@ -53,4 +53,16 @@
public static native void watchFieldModification(Field f);
public static native void watchAllFieldAccesses();
public static native void watchAllFieldModifications();
+
+ // the names, arguments, and even line numbers of these functions are embedded in the tests so we
+ // need to add to the bottom and not modify old ones to maintain compat.
+ public static native void enableTracing2(Class<?> methodClass,
+ Method entryMethod,
+ Method exitMethod,
+ Method fieldAccess,
+ Method fieldModify,
+ Method singleStep,
+ Method ThreadStart,
+ Method ThreadEnd,
+ Thread thr);
}
diff --git a/test/991-field-trace-2/src/art/Trace.java b/test/991-field-trace-2/src/art/Trace.java
index ba3d397..8999bb1 100644
--- a/test/991-field-trace-2/src/art/Trace.java
+++ b/test/991-field-trace-2/src/art/Trace.java
@@ -53,4 +53,16 @@
public static native void watchFieldModification(Field f);
public static native void watchAllFieldAccesses();
public static native void watchAllFieldModifications();
+
+ // the names, arguments, and even line numbers of these functions are embedded in the tests so we
+ // need to add to the bottom and not modify old ones to maintain compat.
+ public static native void enableTracing2(Class<?> methodClass,
+ Method entryMethod,
+ Method exitMethod,
+ Method fieldAccess,
+ Method fieldModify,
+ Method singleStep,
+ Method ThreadStart,
+ Method ThreadEnd,
+ Thread thr);
}
diff --git a/test/997-single-step/src/art/Trace.java b/test/997-single-step/src/art/Trace.java
index ba3d397..8999bb1 100644
--- a/test/997-single-step/src/art/Trace.java
+++ b/test/997-single-step/src/art/Trace.java
@@ -53,4 +53,16 @@
public static native void watchFieldModification(Field f);
public static native void watchAllFieldAccesses();
public static native void watchAllFieldModifications();
+
+ // the names, arguments, and even line numbers of these functions are embedded in the tests so we
+ // need to add to the bottom and not modify old ones to maintain compat.
+ public static native void enableTracing2(Class<?> methodClass,
+ Method entryMethod,
+ Method exitMethod,
+ Method fieldAccess,
+ Method fieldModify,
+ Method singleStep,
+ Method ThreadStart,
+ Method ThreadEnd,
+ Thread thr);
}
diff --git a/test/Android.bp b/test/Android.bp
index 2af03e3..b737345 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -74,7 +74,7 @@
],
target: {
- linux: {
+ linux_glibc: {
ldflags: [
// Allow jni_compiler_test to find Java_MyClassNatives_bar
// within itself using dlopen(NULL, ...).
@@ -143,7 +143,7 @@
"-Wno-missing-noreturn",
],
},
- linux: {
+ linux_glibc: {
cflags: [
// gtest issue
"-Wno-used-but-marked-unused",
@@ -226,6 +226,7 @@
"ti-agent/redefinition_helper.cc",
"ti-agent/suspension_helper.cc",
"ti-agent/stack_trace_helper.cc",
+ "ti-agent/threads_helper.cc",
"ti-agent/trace_helper.cc",
"ti-agent/exceptions_helper.cc",
// This is the list of non-special OnLoad things and excludes BCI and anything that depends
@@ -275,6 +276,8 @@
"1927-exception-event/exception_event.cc",
"1930-monitor-info/monitor.cc",
"1932-monitor-events-misc/monitor_misc.cc",
+ "1934-jvmti-signal-thread/signal_threads.cc",
+ "1939-proxy-frames/local_instance.cc",
],
shared_libs: [
"libbase",
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 6017d28..4b49142 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -22,8 +22,7 @@
$(HOST_OUT_EXECUTABLES)/dx \
$(HOST_OUT_EXECUTABLES)/jasmin \
$(HOST_OUT_EXECUTABLES)/smali \
- $(HOST_OUT_EXECUTABLES)/dexmerger \
- $(JACK)
+ $(HOST_OUT_EXECUTABLES)/dexmerger
# Convert's a rule name to the form used in variables, e.g. no-relocate to NO_RELOCATE
define name-to-var
@@ -124,19 +123,9 @@
# Host executables.
host_prereq_rules := $(ART_TEST_HOST_RUN_TEST_DEPENDENCIES)
-ifeq ($(ANDROID_COMPILE_WITH_JACK),true)
-# Classpath for Jack compilation for host.
-host_prereq_rules += $(HOST_JACK_CLASSPATH_DEPENDENCIES)
-endif
-
-# Required for dx, jasmin, smali, dexmerger, jack.
+# Required for dx, jasmin, smali, dexmerger.
host_prereq_rules += $(TEST_ART_RUN_TEST_DEPENDENCIES)
-ifeq ($(ANDROID_COMPILE_WITH_JACK),true)
-# Classpath for Jack compilation for target.
-target_prereq_rules := $(TARGET_JACK_CLASSPATH_DEPENDENCIES)
-endif
-
# Sync test files to the target, depends upon all things that must be pushed
#to the target.
target_prereq_rules += test-art-target-sync
diff --git a/test/dexdump/const-method-handle.dex b/test/dexdump/const-method-handle.dex
new file mode 100644
index 0000000..1fe28e5
--- /dev/null
+++ b/test/dexdump/const-method-handle.dex
Binary files differ
diff --git a/test/dexdump/const-method-handle.lst b/test/dexdump/const-method-handle.lst
new file mode 100644
index 0000000..961d427
--- /dev/null
+++ b/test/dexdump/const-method-handle.lst
@@ -0,0 +1,9 @@
+#const-method-handle.dex
+0x000003c0 8 Main <init> ()V Main.java 22
+0x000003d8 50 Main main ([Ljava/lang/String;)V Main.java 26
+0x0000041c 8 constmethodhandle.ConstTest <init> ()V ConstTest.java 22
+0x00000434 94 constmethodhandle.ConstTest displayMethodHandle (Ljava/lang/invoke/MethodHandle;)V ConstTest.java 24
+0x000004a4 50 constmethodhandle.ConstTest displayMethodType (Ljava/lang/invoke/MethodType;)V ConstTest.java 29
+0x000004e8 30 constmethodhandle.ConstTest main ([Ljava/lang/String;)V ConstTest.java -1
+0x00000518 6 constmethodhandle.ConstTest test1 ()Ljava/lang/invoke/MethodHandle; ConstTest.java -1
+0x00000530 6 constmethodhandle.ConstTest test2 ()Ljava/lang/invoke/MethodType; ConstTest.java -1
diff --git a/test/dexdump/const-method-handle.txt b/test/dexdump/const-method-handle.txt
new file mode 100644
index 0000000..6b33502
--- /dev/null
+++ b/test/dexdump/const-method-handle.txt
@@ -0,0 +1,275 @@
+Processing 'const-method-handle.dex'...
+Opened 'const-method-handle.dex', DEX version '039'
+DEX file header:
+magic : 'dex\n039\0'
+checksum : 16656a27
+signature : 1953...5aa5
+file_size : 2524
+header_size : 112
+link_size : 0
+link_off : 0 (0x000000)
+string_ids_size : 57
+string_ids_off : 112 (0x000070)
+type_ids_size : 26
+type_ids_off : 340 (0x000154)
+proto_ids_size : 18
+proto_ids_off : 444 (0x0001bc)
+field_ids_size : 2
+field_ids_off : 660 (0x000294)
+method_ids_size : 23
+method_ids_off : 676 (0x0002a4)
+class_defs_size : 2
+class_defs_off : 860 (0x00035c)
+data_size : 1588
+data_off : 936 (0x0003a8)
+
+Class #0 header:
+class_idx : 5
+access_flags : 1 (0x0001)
+superclass_idx : 11
+interfaces_off : 0 (0x000000)
+source_file_idx : 29
+annotations_off : 1336 (0x000538)
+class_data_off : 2270 (0x0008de)
+static_fields_size : 0
+instance_fields_size: 0
+direct_methods_size : 2
+virtual_methods_size: 0
+
+Class #0 annotations:
+Annotations on method #1 'main'
+ VISIBILITY_SYSTEM Ldalvik/annotation/Throws; value={ Ljava/lang/Throwable; }
+
+Class #0 -
+ Class descriptor : 'LMain;'
+ Access flags : 0x0001 (PUBLIC)
+ Superclass : 'Ljava/lang/Object;'
+ Interfaces -
+ Static fields -
+ Instance fields -
+ Direct methods -
+ #0 : (in LMain;)
+ name : '<init>'
+ type : '()V'
+ access : 0x10001 (PUBLIC CONSTRUCTOR)
+ code -
+ registers : 1
+ ins : 1
+ outs : 1
+ insns size : 4 16-bit code units
+0003b0: |[0003b0] Main.<init>:()V
+0003c0: 7010 0a00 0000 |0000: invoke-direct {v0}, Ljava/lang/Object;.<init>:()V // method@000a
+0003c6: 0e00 |0003: return-void
+ catches : (none)
+ positions :
+ 0x0000 line=22
+ locals :
+ 0x0000 - 0x0004 reg=0 this LMain;
+
+ #1 : (in LMain;)
+ name : 'main'
+ type : '([Ljava/lang/String;)V'
+ access : 0x0009 (PUBLIC STATIC)
+ code -
+ registers : 6
+ ins : 1
+ outs : 4
+ insns size : 25 16-bit code units
+0003c8: |[0003c8] Main.main:([Ljava/lang/String;)V
+0003d8: 7100 1500 0000 |0000: invoke-static {}, Ljava/lang/invoke/MethodHandles;.lookup:()Ljava/lang/invoke/MethodHandles$Lookup; // method@0015
+0003de: 0c00 |0003: move-result-object v0
+0003e0: 1c01 0600 |0004: const-class v1, Lconstmethodhandle/ConstTest; // type@0006
+0003e4: 1a02 3000 |0006: const-string v2, "main" // string@0030
+0003e8: 6203 0100 |0008: sget-object v3, Ljava/lang/Void;.TYPE:Ljava/lang/Class; // field@0001
+0003ec: 1c04 1900 |000a: const-class v4, [Ljava/lang/String; // type@0019
+0003f0: 7120 1600 4300 |000c: invoke-static {v3, v4}, Ljava/lang/invoke/MethodType;.methodType:(Ljava/lang/Class;Ljava/lang/Class;)Ljava/lang/invoke/MethodType; // method@0016
+0003f6: 0c03 |000f: move-result-object v3
+0003f8: 6e40 1400 1032 |0010: invoke-virtual {v0, v1, v2, v3}, Ljava/lang/invoke/MethodHandles$Lookup;.findStatic:(Ljava/lang/Class;Ljava/lang/String;Ljava/lang/invoke/MethodType;)Ljava/lang/invoke/MethodHandle; // method@0014
+0003fe: 0c00 |0013: move-result-object v0
+000400: fa20 1200 5000 1000 |0014: invoke-polymorphic {v0, v5}, Ljava/lang/invoke/MethodHandle;.invokeExact:([Ljava/lang/Object;)Ljava/lang/Object;, ([Ljava/lang/String;)V // method@0012, proto@0010
+000408: 0e00 |0018: return-void
+ catches : (none)
+ positions :
+ 0x0000 line=26
+ 0x000c line=27
+ 0x0014 line=28
+ 0x0018 line=29
+ locals :
+ 0x0000 - 0x0019 reg=5 (null) [Ljava/lang/String;
+
+ Virtual methods -
+ source_file_idx : 29 (Main.java)
+
+Class #1 header:
+class_idx : 6
+access_flags : 1 (0x0001)
+superclass_idx : 11
+interfaces_off : 0 (0x000000)
+source_file_idx : 3
+annotations_off : 1360 (0x000550)
+class_data_off : 2284 (0x0008ec)
+static_fields_size : 0
+instance_fields_size: 0
+direct_methods_size : 6
+virtual_methods_size: 0
+
+Class #1 annotations:
+Annotations on method #3 'displayMethodHandle'
+ VISIBILITY_SYSTEM Ldalvik/annotation/Throws; value={ Ljava/lang/Throwable; }
+
+Class #1 -
+ Class descriptor : 'Lconstmethodhandle/ConstTest;'
+ Access flags : 0x0001 (PUBLIC)
+ Superclass : 'Ljava/lang/Object;'
+ Interfaces -
+ Static fields -
+ Instance fields -
+ Direct methods -
+ #0 : (in Lconstmethodhandle/ConstTest;)
+ name : '<init>'
+ type : '()V'
+ access : 0x10001 (PUBLIC CONSTRUCTOR)
+ code -
+ registers : 1
+ ins : 1
+ outs : 1
+ insns size : 4 16-bit code units
+00040c: |[00040c] constmethodhandle.ConstTest.<init>:()V
+00041c: 7010 0a00 0000 |0000: invoke-direct {v0}, Ljava/lang/Object;.<init>:()V // method@000a
+000422: 0e00 |0003: return-void
+ catches : (none)
+ positions :
+ 0x0000 line=22
+ locals :
+ 0x0000 - 0x0004 reg=0 this Lconstmethodhandle/ConstTest;
+
+ #1 : (in Lconstmethodhandle/ConstTest;)
+ name : 'displayMethodHandle'
+ type : '(Ljava/lang/invoke/MethodHandle;)V'
+ access : 0x000a (PRIVATE STATIC)
+ code -
+ registers : 4
+ ins : 1
+ outs : 2
+ insns size : 47 16-bit code units
+000424: |[000424] constmethodhandle.ConstTest.displayMethodHandle:(Ljava/lang/invoke/MethodHandle;)V
+000434: 6200 0000 |0000: sget-object v0, Ljava/lang/System;.out:Ljava/io/PrintStream; // field@0000
+000438: 2201 0d00 |0002: new-instance v1, Ljava/lang/StringBuilder; // type@000d
+00043c: 7010 0c00 0100 |0004: invoke-direct {v1}, Ljava/lang/StringBuilder;.<init>:()V // method@000c
+000442: 1a02 1e00 |0007: const-string v2, "MethodHandle " // string@001e
+000446: 6e20 0e00 2100 |0009: invoke-virtual {v1, v2}, Ljava/lang/StringBuilder;.append:(Ljava/lang/String;)Ljava/lang/StringBuilder; // method@000e
+00044c: 0c01 |000c: move-result-object v1
+00044e: 6e20 0d00 3100 |000d: invoke-virtual {v1, v3}, Ljava/lang/StringBuilder;.append:(Ljava/lang/Object;)Ljava/lang/StringBuilder; // method@000d
+000454: 0c01 |0010: move-result-object v1
+000456: 1a02 0000 |0011: const-string v2, " => " // string@0000
+00045a: 6e20 0e00 2100 |0013: invoke-virtual {v1, v2}, Ljava/lang/StringBuilder;.append:(Ljava/lang/String;)Ljava/lang/StringBuilder; // method@000e
+000460: 0c01 |0016: move-result-object v1
+000462: 1402 0030 4046 |0017: const v2, #float 12300 // #46403000
+000468: 7110 0900 0200 |001a: invoke-static {v2}, Ljava/lang/Float;.valueOf:(F)Ljava/lang/Float; // method@0009
+00046e: 0c02 |001d: move-result-object v2
+000470: fa20 1100 2300 0100 |001e: invoke-polymorphic {v3, v2}, Ljava/lang/invoke/MethodHandle;.invoke:([Ljava/lang/Object;)Ljava/lang/Object;, (Ljava/lang/Object;)Ljava/lang/Class; // method@0011, proto@0001
+000478: 0c02 |0022: move-result-object v2
+00047a: 6e20 0d00 2100 |0023: invoke-virtual {v1, v2}, Ljava/lang/StringBuilder;.append:(Ljava/lang/Object;)Ljava/lang/StringBuilder; // method@000d
+000480: 0c01 |0026: move-result-object v1
+000482: 6e10 0f00 0100 |0027: invoke-virtual {v1}, Ljava/lang/StringBuilder;.toString:()Ljava/lang/String; // method@000f
+000488: 0c01 |002a: move-result-object v1
+00048a: 6e20 0800 1000 |002b: invoke-virtual {v0, v1}, Ljava/io/PrintStream;.println:(Ljava/lang/String;)V // method@0008
+000490: 0e00 |002e: return-void
+ catches : (none)
+ positions :
+ 0x0000 line=24
+ 0x001a line=25
+ 0x002b line=24
+ 0x002e line=26
+ locals :
+ 0x0000 - 0x002f reg=3 (null) Ljava/lang/invoke/MethodHandle;
+
+ #2 : (in Lconstmethodhandle/ConstTest;)
+ name : 'displayMethodType'
+ type : '(Ljava/lang/invoke/MethodType;)V'
+ access : 0x000a (PRIVATE STATIC)
+ code -
+ registers : 4
+ ins : 1
+ outs : 2
+ insns size : 25 16-bit code units
+000494: |[000494] constmethodhandle.ConstTest.displayMethodType:(Ljava/lang/invoke/MethodType;)V
+0004a4: 6200 0000 |0000: sget-object v0, Ljava/lang/System;.out:Ljava/io/PrintStream; // field@0000
+0004a8: 2201 0d00 |0002: new-instance v1, Ljava/lang/StringBuilder; // type@000d
+0004ac: 7010 0c00 0100 |0004: invoke-direct {v1}, Ljava/lang/StringBuilder;.<init>:()V // method@000c
+0004b2: 1a02 1f00 |0007: const-string v2, "MethodType " // string@001f
+0004b6: 6e20 0e00 2100 |0009: invoke-virtual {v1, v2}, Ljava/lang/StringBuilder;.append:(Ljava/lang/String;)Ljava/lang/StringBuilder; // method@000e
+0004bc: 0c01 |000c: move-result-object v1
+0004be: 6e20 0d00 3100 |000d: invoke-virtual {v1, v3}, Ljava/lang/StringBuilder;.append:(Ljava/lang/Object;)Ljava/lang/StringBuilder; // method@000d
+0004c4: 0c01 |0010: move-result-object v1
+0004c6: 6e10 0f00 0100 |0011: invoke-virtual {v1}, Ljava/lang/StringBuilder;.toString:()Ljava/lang/String; // method@000f
+0004cc: 0c01 |0014: move-result-object v1
+0004ce: 6e20 0800 1000 |0015: invoke-virtual {v0, v1}, Ljava/io/PrintStream;.println:(Ljava/lang/String;)V // method@0008
+0004d4: 0e00 |0018: return-void
+ catches : (none)
+ positions :
+ 0x0000 line=29
+ 0x0018 line=30
+ locals :
+ 0x0000 - 0x0019 reg=3 (null) Ljava/lang/invoke/MethodType;
+
+ #3 : (in Lconstmethodhandle/ConstTest;)
+ name : 'main'
+ type : '([Ljava/lang/String;)V'
+ access : 0x0009 (PUBLIC STATIC)
+ code -
+ registers : 2
+ ins : 1
+ outs : 1
+ insns size : 15 16-bit code units
+0004d8: |[0004d8] constmethodhandle.ConstTest.main:([Ljava/lang/String;)V
+0004e8: 7100 0600 0000 |0000: invoke-static {}, Lconstmethodhandle/ConstTest;.test1:()Ljava/lang/invoke/MethodHandle; // method@0006
+0004ee: 0c00 |0003: move-result-object v0
+0004f0: 7110 0300 0000 |0004: invoke-static {v0}, Lconstmethodhandle/ConstTest;.displayMethodHandle:(Ljava/lang/invoke/MethodHandle;)V // method@0003
+0004f6: 7100 0700 0000 |0007: invoke-static {}, Lconstmethodhandle/ConstTest;.test2:()Ljava/lang/invoke/MethodType; // method@0007
+0004fc: 0c00 |000a: move-result-object v0
+0004fe: 7110 0400 0000 |000b: invoke-static {v0}, Lconstmethodhandle/ConstTest;.displayMethodType:(Ljava/lang/invoke/MethodType;)V // method@0004
+000504: 0e00 |000e: return-void
+ catches : (none)
+ positions :
+ locals :
+
+ #4 : (in Lconstmethodhandle/ConstTest;)
+ name : 'test1'
+ type : '()Ljava/lang/invoke/MethodHandle;'
+ access : 0x0009 (PUBLIC STATIC)
+ code -
+ registers : 1
+ ins : 0
+ outs : 0
+ insns size : 3 16-bit code units
+000508: |[000508] constmethodhandle.ConstTest.test1:()Ljava/lang/invoke/MethodHandle;
+000518: fe00 0000 |0000: const-method-handle v0, method_handle@0000
+00051c: 1100 |0002: return-object v0
+ catches : (none)
+ positions :
+ locals :
+
+ #5 : (in Lconstmethodhandle/ConstTest;)
+ name : 'test2'
+ type : '()Ljava/lang/invoke/MethodType;'
+ access : 0x0009 (PUBLIC STATIC)
+ code -
+ registers : 1
+ ins : 0
+ outs : 0
+ insns size : 3 16-bit code units
+000520: |[000520] constmethodhandle.ConstTest.test2:()Ljava/lang/invoke/MethodType;
+000530: ff00 1100 |0000: const-method-type v0, (CSIJFDLjava/lang/Object;)Z // proto@0011
+000534: 1100 |0002: return-object v0
+ catches : (none)
+ positions :
+ locals :
+
+ Virtual methods -
+ source_file_idx : 3 (ConstTest.java)
+
+Method handle #0:
+ type : invoke-instance
+ target : Ljava/lang/Object; getClass
+ target_type : (Ljava/lang/Object;)Ljava/lang/Class;
diff --git a/test/dexdump/const-method-handle.xml b/test/dexdump/const-method-handle.xml
new file mode 100644
index 0000000..f1cf9f8
--- /dev/null
+++ b/test/dexdump/const-method-handle.xml
@@ -0,0 +1,91 @@
+<api>
+<package name=""
+>
+<class name="Main"
+ extends="java.lang.Object"
+ interface="false"
+ abstract="false"
+ static="false"
+ final="false"
+ visibility="public"
+>
+<constructor name="Main"
+ type="Main"
+ static="false"
+ final="false"
+ visibility="public"
+>
+</constructor>
+<method name="main"
+ return="void"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="true"
+ final="false"
+ visibility="public"
+>
+<parameter name="arg0" type="java.lang.String[]">
+</parameter>
+</method>
+</class>
+</package>
+<package name="constmethodhandle"
+>
+<class name="ConstTest"
+ extends="java.lang.Object"
+ interface="false"
+ abstract="false"
+ static="false"
+ final="false"
+ visibility="public"
+>
+<constructor name="ConstTest"
+ type="constmethodhandle.ConstTest"
+ static="false"
+ final="false"
+ visibility="public"
+>
+</constructor>
+<method name="main"
+ return="void"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="true"
+ final="false"
+ visibility="public"
+>
+<parameter name="arg0" type="java.lang.String[]">
+</parameter>
+</method>
+<method name="test1"
+ return="java.lang.invoke.MethodHandle"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="true"
+ final="false"
+ visibility="public"
+>
+</method>
+<method name="test2"
+ return="java.lang.invoke.MethodType"
+ abstract="false"
+ native="false"
+ synchronized="false"
+ static="true"
+ final="false"
+ visibility="public"
+>
+</method>
+</class>
+<method_handle index="0"
+ type="invoke-instance"
+ target_class="Ljava/lang/Object;"
+ target_member="getClass"
+ target_member_type="(Ljava/lang/Object;)Ljava/lang/Class;"
+>
+</method_handle>
+</package>
+</api>
diff --git a/test/etc/default-build b/test/etc/default-build
index bafd415..f14424e 100755
--- a/test/etc/default-build
+++ b/test/etc/default-build
@@ -292,8 +292,13 @@
dx_input="${name}"
fi
+ local dexer="${DX}"
+ if [ ${USE_D8} = "true" ]; then
+ dexer="${ANDROID_HOST_OUT}/bin/d8"
+ fi
+
# Make dex file from desugared JAR.
- ${DX} -JXmx256m ${DX_VM_FLAGS} --debug --dex --dump-to=${name}.lst --output=${name}.dex --dump-width=1000 ${DX_FLAGS} "${dx_input}"
+ ${dexer} -JXmx256m ${DX_VM_FLAGS} --debug --dex --dump-to=${name}.lst --output=${name}.dex --dump-width=1000 ${DX_FLAGS} "${dx_input}"
}
# Merge all the dex files in $1..$N into $1. Skip non-existing files, but at least 1 file must exist.
diff --git a/test/knownfailures.json b/test/knownfailures.json
index df24c7d..56a06d6 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -1,5 +1,18 @@
[
{
+ "tests": "1939-proxy-frames",
+ "description": ["Test 1939 seems to consistently fail in gcstress on 64 bit with",
+ "a proxy this object having no associated class!"],
+ "variant": "gcstress",
+ "bug": "http://b/67679263"
+ },
+ {
+ "tests": "1934-jvmti-signal-thread",
+ "description": ["Disables 1934-jvmti-signal-thread in tracing configurations"],
+ "variant": "trace | stream",
+ "bug": "http://b/67384421"
+ },
+ {
"tests": "153-reference-stress",
"description": ["Disable 153-reference-stress temporarily until a fix",
"arrives."],
@@ -231,7 +244,7 @@
},
{
"tests": "597-deopt-invoke-stub",
- "variant": "interp-ac | interpreter | optimizing | trace | stream",
+ "variant": "speed-profile | interp-ac | interpreter | optimizing | trace | stream",
"description": ["This test expects JIT compilation and no AOT for",
"testing deoptimizing at quick-to-interpreter bridge."]
},
diff --git a/test/run-test b/test/run-test
index 79f3d1e..d04cd05 100755
--- a/test/run-test
+++ b/test/run-test
@@ -45,6 +45,7 @@
export RUN="${progdir}/etc/run-test-jar"
export DEX_LOCATION=/data/run-test/${test_dir}
export NEED_DEX="true"
+export USE_D8="false"
export USE_JACK="false"
export USE_DESUGAR="true"
export SMALI_ARGS=""
@@ -339,6 +340,9 @@
elif [ "x$1" = "x--build-only" ]; then
build_only="yes"
shift
+ elif [ "x$1" = "x--build-with-d8" ]; then
+ USE_D8="true"
+ shift
elif [ "x$1" = "x--build-with-javac-dx" ]; then
USE_JACK="false"
shift
@@ -648,6 +652,7 @@
echo " --gdb Run under gdb; incompatible with some tests."
echo " --gdb-arg Pass an option to gdb."
echo " --build-only Build test files only (off by default)."
+ echo " --build-with-d8 Build test files with javac and d8 (off by default)."
echo " --build-with-javac-dx Build test files with javac and dx (off by default)."
echo " --build-with-jack Build test files with jack and jill (on by default)."
echo " --interpreter Enable interpreter only mode (off by default)."
diff --git a/test/testrunner/env.py b/test/testrunner/env.py
index cc19afc..b9123de 100644
--- a/test/testrunner/env.py
+++ b/test/testrunner/env.py
@@ -34,7 +34,8 @@
'HOST_PREFER_32_BIT',
'HOST_OUT_EXECUTABLES',
'ANDROID_JAVA_TOOLCHAIN',
- 'ANDROID_COMPILE_WITH_JACK']
+ 'ANDROID_COMPILE_WITH_JACK',
+ 'USE_D8_BY_DEFAULT']
_DUMP_MANY_VARS = None # To be set to a dictionary with above list being the keys,
# and the build variable being the value.
def _dump_many_vars(var_name):
@@ -109,6 +110,9 @@
# Compiling with jack? Possible values in (True, False, 'default')
ANDROID_COMPILE_WITH_JACK = _get_build_var_boolean('ANDROID_COMPILE_WITH_JACK', 'default')
+# Follow the build system's D8 usage.
+USE_D8_BY_DEFAULT = _get_build_var_boolean('USE_D8_BY_DEFAULT', False)
+
# Directory used for temporary test files on the host.
ART_HOST_TEST_DIR = tempfile.mkdtemp(prefix = 'test-art-')
diff --git a/test/testrunner/run_build_test_target.py b/test/testrunner/run_build_test_target.py
index 49444d4..492b792 100755
--- a/test/testrunner/run_build_test_target.py
+++ b/test/testrunner/run_build_test_target.py
@@ -96,7 +96,9 @@
run_test_command = [os.path.join(env.ANDROID_BUILD_TOP,
'art/test/testrunner/testrunner.py')]
run_test_command += target.get('run-test', [])
- run_test_command += ['-j', str(n_threads)]
+ # Let testrunner compute concurrency based on #cpus.
+ # b/65822340
+ # run_test_command += ['-j', str(n_threads)]
run_test_command += ['-b']
run_test_command += ['--host']
run_test_command += ['--verbose']
diff --git a/test/testrunner/target_config.py b/test/testrunner/target_config.py
index e8b6f1c..6d21442 100644
--- a/test/testrunner/target_config.py
+++ b/test/testrunner/target_config.py
@@ -66,6 +66,13 @@
'ART_USE_READ_BARRIER' : 'true'
}
},
+ 'art-pictest' : {
+ 'run-test' : ['--pictest',
+ '--optimizing'],
+ 'env' : {
+ 'ART_USE_READ_BARRIER' : 'true'
+ }
+ },
'art-gcstress-gcverify': {
'run-test': ['--gcstress',
'--gcverify'],
diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py
index 2a772ff..ca29d0a 100755
--- a/test/testrunner/testrunner.py
+++ b/test/testrunner/testrunner.py
@@ -237,6 +237,7 @@
n_thread = get_default_threads('target')
else:
n_thread = get_default_threads('host')
+ print_text("Concurrency: " + str(n_thread) + "\n")
global semaphore
semaphore = threading.Semaphore(n_thread)
@@ -424,6 +425,9 @@
elif env.ANDROID_COMPILE_WITH_JACK == False:
options_test += ' --build-with-javac-dx'
+ if env.USE_D8_BY_DEFAULT == True:
+ options_test += ' --build-with-d8'
+
# TODO(http://36039166): This is a temporary solution to
# fix build breakages.
options_test = (' --output-path %s') % (
@@ -477,7 +481,7 @@
if test_passed:
print_test_info(test_name, 'PASS')
else:
- failed_tests.append((test_name, script_output))
+ failed_tests.append((test_name, str(command) + "\n" + script_output))
if not env.ART_TEST_KEEP_GOING:
stop_testrunner = True
print_test_info(test_name, 'FAIL', ('%s\n%s') % (
@@ -532,10 +536,17 @@
total_test_count)
if result == 'FAIL' or result == 'TIMEOUT':
- info += ('%s %s %s\n') % (
- progress_info,
- test_name,
- COLOR_ERROR + result + COLOR_NORMAL)
+ if not verbose:
+ info += ('%s %s %s\n') % (
+ progress_info,
+ test_name,
+ COLOR_ERROR + result + COLOR_NORMAL)
+ else:
+ info += ('%s %s %s\n%s\n') % (
+ progress_info,
+ test_name,
+ COLOR_ERROR + result + COLOR_NORMAL,
+ failed_test_info)
else:
result_text = ''
if result == 'PASS':
diff --git a/test/ti-agent/jvmti_helper.cc b/test/ti-agent/jvmti_helper.cc
index 4ca2d5d..bceaa6b 100644
--- a/test/ti-agent/jvmti_helper.cc
+++ b/test/ti-agent/jvmti_helper.cc
@@ -53,7 +53,7 @@
.can_get_monitor_info = 1,
.can_pop_frame = 0,
.can_redefine_classes = 1,
- .can_signal_thread = 0,
+ .can_signal_thread = 1,
.can_get_source_file_name = 1,
.can_get_line_numbers = 1,
.can_get_source_debug_extension = 1,
diff --git a/test/ti-agent/threads_helper.cc b/test/ti-agent/threads_helper.cc
new file mode 100644
index 0000000..f8aafc3
--- /dev/null
+++ b/test/ti-agent/threads_helper.cc
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common_helper.h"
+
+#include "jni.h"
+#include "jvmti.h"
+
+#include "jvmti_helper.h"
+#include "scoped_local_ref.h"
+#include "test_env.h"
+
+namespace art {
+namespace common_threads {
+
+extern "C" JNIEXPORT void Java_art_Threads_interruptThread(JNIEnv* env, jclass, jthread thr) {
+ JvmtiErrorToException(env, jvmti_env, jvmti_env->InterruptThread(thr));
+}
+
+extern "C" JNIEXPORT void Java_art_Threads_stopThread(JNIEnv* env,
+ jclass,
+ jthread thr,
+ jobject exception) {
+ JvmtiErrorToException(env, jvmti_env, jvmti_env->StopThread(thr, exception));
+}
+
+} // namespace common_threads
+} // namespace art
diff --git a/test/ti-agent/trace_helper.cc b/test/ti-agent/trace_helper.cc
index 1f8ceff..8b74c7c 100644
--- a/test/ti-agent/trace_helper.cc
+++ b/test/ti-agent/trace_helper.cc
@@ -34,11 +34,36 @@
jmethodID field_access;
jmethodID field_modify;
jmethodID single_step;
+ jmethodID thread_start;
+ jmethodID thread_end;
bool in_callback;
bool access_watch_on_load;
bool modify_watch_on_load;
};
+static void threadStartCB(jvmtiEnv* jvmti,
+ JNIEnv* jnienv,
+ jthread thread) {
+ TraceData* data = nullptr;
+ if (JvmtiErrorToException(jnienv, jvmti,
+ jvmti->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ CHECK(data->thread_start != nullptr);
+ jnienv->CallStaticVoidMethod(data->test_klass, data->thread_start, thread);
+}
+static void threadEndCB(jvmtiEnv* jvmti,
+ JNIEnv* jnienv,
+ jthread thread) {
+ TraceData* data = nullptr;
+ if (JvmtiErrorToException(jnienv, jvmti,
+ jvmti->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ CHECK(data->thread_end != nullptr);
+ jnienv->CallStaticVoidMethod(data->test_klass, data->thread_end, thread);
+}
+
static void singleStepCB(jvmtiEnv* jvmti,
JNIEnv* jnienv,
jthread thread,
@@ -362,7 +387,7 @@
env->DeleteLocalRef(klass);
}
-extern "C" JNIEXPORT void JNICALL Java_art_Trace_enableTracing(
+extern "C" JNIEXPORT void JNICALL Java_art_Trace_enableTracing2(
JNIEnv* env,
jclass trace ATTRIBUTE_UNUSED,
jclass klass,
@@ -371,6 +396,8 @@
jobject field_access,
jobject field_modify,
jobject single_step,
+ jobject thread_start,
+ jobject thread_end,
jthread thr) {
TraceData* data = nullptr;
if (JvmtiErrorToException(env,
@@ -386,6 +413,8 @@
data->field_access = field_access != nullptr ? env->FromReflectedMethod(field_access) : nullptr;
data->field_modify = field_modify != nullptr ? env->FromReflectedMethod(field_modify) : nullptr;
data->single_step = single_step != nullptr ? env->FromReflectedMethod(single_step) : nullptr;
+ data->thread_start = thread_start != nullptr ? env->FromReflectedMethod(thread_start) : nullptr;
+ data->thread_end = thread_end != nullptr ? env->FromReflectedMethod(thread_end) : nullptr;
data->in_callback = false;
TraceData* old_data = nullptr;
@@ -410,6 +439,8 @@
cb.FieldModification = fieldModificationCB;
cb.ClassPrepare = classPrepareCB;
cb.SingleStep = singleStepCB;
+ cb.ThreadStart = threadStartCB;
+ cb.ThreadEnd = threadEndCB;
if (JvmtiErrorToException(env, jvmti_env, jvmti_env->SetEventCallbacks(&cb, sizeof(cb)))) {
return;
}
@@ -453,6 +484,46 @@
thr))) {
return;
}
+ if (thread_start != nullptr &&
+ JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_THREAD_START,
+ thr))) {
+ return;
+ }
+ if (thread_end != nullptr &&
+ JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_THREAD_END,
+ thr))) {
+ return;
+ }
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_Trace_enableTracing(
+ JNIEnv* env,
+ jclass trace,
+ jclass klass,
+ jobject enter,
+ jobject exit,
+ jobject field_access,
+ jobject field_modify,
+ jobject single_step,
+ jthread thr) {
+ Java_art_Trace_enableTracing2(env,
+ trace,
+ klass,
+ enter,
+ exit,
+ field_access,
+ field_modify,
+ single_step,
+ /* thread_start */ nullptr,
+ /* thread_end */ nullptr,
+ thr);
+ return;
}
extern "C" JNIEXPORT void JNICALL Java_art_Trace_disableTracing(
diff --git a/tools/ahat/Android.mk b/tools/ahat/Android.mk
index cf31e2e..5eccba1 100644
--- a/tools/ahat/Android.mk
+++ b/tools/ahat/Android.mk
@@ -20,9 +20,9 @@
# --- ahat.jar ----------------
include $(CLEAR_VARS)
-LOCAL_SRC_FILES := $(call all-java-files-under, src)
-LOCAL_JAR_MANIFEST := src/manifest.txt
-LOCAL_JAVA_RESOURCE_FILES := $(LOCAL_PATH)/src/style.css
+LOCAL_SRC_FILES := $(call all-java-files-under, src/main)
+LOCAL_JAR_MANIFEST := etc/ahat.mf
+LOCAL_JAVA_RESOURCE_FILES := $(LOCAL_PATH)/etc/style.css
LOCAL_IS_HOST_MODULE := true
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE := ahat
@@ -49,9 +49,9 @@
include $(CLEAR_VARS)
LOCAL_MODULE := ahat-test-dump
LOCAL_MODULE_TAGS := tests
-LOCAL_SRC_FILES := $(call all-java-files-under, test-dump)
+LOCAL_SRC_FILES := $(call all-java-files-under, src/test-dump)
LOCAL_PROGUARD_ENABLED := obfuscation
-LOCAL_PROGUARD_FLAG_FILES := test-dump/config.pro
+LOCAL_PROGUARD_FLAG_FILES := etc/test-dump.pro
include $(BUILD_JAVA_LIBRARY)
# Determine the location of the test-dump.jar, test-dump.hprof, and proguard
@@ -87,15 +87,15 @@
# --- ahat-tests.jar --------------
include $(CLEAR_VARS)
-LOCAL_SRC_FILES := $(call all-java-files-under, test)
-LOCAL_JAR_MANIFEST := test/manifest.txt
+LOCAL_SRC_FILES := $(call all-java-files-under, src/test)
+LOCAL_JAR_MANIFEST := etc/ahat-tests.mf
LOCAL_JAVA_RESOURCE_FILES := \
$(AHAT_TEST_DUMP_HPROF) \
$(AHAT_TEST_DUMP_BASE_HPROF) \
$(AHAT_TEST_DUMP_PROGUARD_MAP) \
- $(LOCAL_PATH)/test-dump/L.hprof \
- $(LOCAL_PATH)/test-dump/O.hprof \
- $(LOCAL_PATH)/test-dump/RI.hprof
+ $(LOCAL_PATH)/etc/L.hprof \
+ $(LOCAL_PATH)/etc/O.hprof \
+ $(LOCAL_PATH)/etc/RI.hprof
LOCAL_STATIC_JAVA_LIBRARIES := ahat junit-host
LOCAL_IS_HOST_MODULE := true
LOCAL_MODULE_TAGS := tests
diff --git a/tools/ahat/README.txt b/tools/ahat/README.txt
index ed40cb7..a765b17 100644
--- a/tools/ahat/README.txt
+++ b/tools/ahat/README.txt
@@ -48,15 +48,22 @@
time.
* That we don't show the 'extra' column in the DominatedList if we are
showing all the instances.
- * That Instance.asString properly takes into account "offset" and
- "count" fields, if they are present.
* Instance.getDexCacheLocation
Reported Issues:
* Request to be able to sort tables by size.
Release History:
- 1.4 Pending
+ 1.5 Pending
+
+ 1.4 October 03, 2017
+ Give better error messages on failure to launch ahat.
+ Properly mark thread and non-default root objects as roots.
+ Improve startup performance, in some cases significantly.
+ Other miscellaneous bug fixes.
+
+ 1.3.1 August 22, 2017
+ Don't include weak references in sample paths.
1.3 July 25, 2017
Improve diffing of static and instance fields.
diff --git a/tools/ahat/test-dump/L.hprof b/tools/ahat/etc/L.hprof
similarity index 100%
rename from tools/ahat/test-dump/L.hprof
rename to tools/ahat/etc/L.hprof
Binary files differ
diff --git a/tools/ahat/test-dump/O.hprof b/tools/ahat/etc/O.hprof
similarity index 100%
rename from tools/ahat/test-dump/O.hprof
rename to tools/ahat/etc/O.hprof
Binary files differ
diff --git a/tools/ahat/etc/README.txt b/tools/ahat/etc/README.txt
new file mode 100644
index 0000000..e9b5b22
--- /dev/null
+++ b/tools/ahat/etc/README.txt
@@ -0,0 +1,9 @@
+L.hprof
+ A version of the test-dump hprof generated on Android L, with one of the
+ ROOT_DEBUGGER records manually changed to a ROOT_FINALIZING record.
+
+O.hprof
+ A version of the test-dump hprof generated on Android O.
+
+RI.hprof
+ A version of the test-dump hprof generated on the reference implementation.
diff --git a/tools/ahat/test-dump/RI.hprof b/tools/ahat/etc/RI.hprof
similarity index 100%
rename from tools/ahat/test-dump/RI.hprof
rename to tools/ahat/etc/RI.hprof
Binary files differ
diff --git a/tools/ahat/test/manifest.txt b/tools/ahat/etc/ahat-tests.mf
similarity index 100%
rename from tools/ahat/test/manifest.txt
rename to tools/ahat/etc/ahat-tests.mf
diff --git a/tools/ahat/src/manifest.txt b/tools/ahat/etc/ahat.mf
similarity index 71%
rename from tools/ahat/src/manifest.txt
rename to tools/ahat/etc/ahat.mf
index d893c5e..1753406 100644
--- a/tools/ahat/src/manifest.txt
+++ b/tools/ahat/etc/ahat.mf
@@ -1,4 +1,4 @@
Name: ahat/
Implementation-Title: ahat
-Implementation-Version: 1.3
+Implementation-Version: 1.4
Main-Class: com.android.ahat.Main
diff --git a/tools/ahat/src/style.css b/tools/ahat/etc/style.css
similarity index 100%
rename from tools/ahat/src/style.css
rename to tools/ahat/etc/style.css
diff --git a/tools/ahat/test-dump/config.pro b/tools/ahat/etc/test-dump.pro
similarity index 100%
rename from tools/ahat/test-dump/config.pro
rename to tools/ahat/etc/test-dump.pro
diff --git a/tools/ahat/src/AhatHandler.java b/tools/ahat/src/main/com/android/ahat/AhatHandler.java
similarity index 100%
rename from tools/ahat/src/AhatHandler.java
rename to tools/ahat/src/main/com/android/ahat/AhatHandler.java
diff --git a/tools/ahat/src/AhatHttpHandler.java b/tools/ahat/src/main/com/android/ahat/AhatHttpHandler.java
similarity index 100%
rename from tools/ahat/src/AhatHttpHandler.java
rename to tools/ahat/src/main/com/android/ahat/AhatHttpHandler.java
diff --git a/tools/ahat/src/BitmapHandler.java b/tools/ahat/src/main/com/android/ahat/BitmapHandler.java
similarity index 100%
rename from tools/ahat/src/BitmapHandler.java
rename to tools/ahat/src/main/com/android/ahat/BitmapHandler.java
diff --git a/tools/ahat/src/Column.java b/tools/ahat/src/main/com/android/ahat/Column.java
similarity index 100%
rename from tools/ahat/src/Column.java
rename to tools/ahat/src/main/com/android/ahat/Column.java
diff --git a/tools/ahat/src/Doc.java b/tools/ahat/src/main/com/android/ahat/Doc.java
similarity index 100%
rename from tools/ahat/src/Doc.java
rename to tools/ahat/src/main/com/android/ahat/Doc.java
diff --git a/tools/ahat/src/DocString.java b/tools/ahat/src/main/com/android/ahat/DocString.java
similarity index 100%
rename from tools/ahat/src/DocString.java
rename to tools/ahat/src/main/com/android/ahat/DocString.java
diff --git a/tools/ahat/src/DominatedList.java b/tools/ahat/src/main/com/android/ahat/DominatedList.java
similarity index 100%
rename from tools/ahat/src/DominatedList.java
rename to tools/ahat/src/main/com/android/ahat/DominatedList.java
diff --git a/tools/ahat/src/HeapTable.java b/tools/ahat/src/main/com/android/ahat/HeapTable.java
similarity index 100%
rename from tools/ahat/src/HeapTable.java
rename to tools/ahat/src/main/com/android/ahat/HeapTable.java
diff --git a/tools/ahat/src/HtmlDoc.java b/tools/ahat/src/main/com/android/ahat/HtmlDoc.java
similarity index 100%
rename from tools/ahat/src/HtmlDoc.java
rename to tools/ahat/src/main/com/android/ahat/HtmlDoc.java
diff --git a/tools/ahat/src/HtmlEscaper.java b/tools/ahat/src/main/com/android/ahat/HtmlEscaper.java
similarity index 100%
rename from tools/ahat/src/HtmlEscaper.java
rename to tools/ahat/src/main/com/android/ahat/HtmlEscaper.java
diff --git a/tools/ahat/src/Main.java b/tools/ahat/src/main/com/android/ahat/Main.java
similarity index 100%
rename from tools/ahat/src/Main.java
rename to tools/ahat/src/main/com/android/ahat/Main.java
diff --git a/tools/ahat/src/Menu.java b/tools/ahat/src/main/com/android/ahat/Menu.java
similarity index 100%
rename from tools/ahat/src/Menu.java
rename to tools/ahat/src/main/com/android/ahat/Menu.java
diff --git a/tools/ahat/src/ObjectHandler.java b/tools/ahat/src/main/com/android/ahat/ObjectHandler.java
similarity index 85%
rename from tools/ahat/src/ObjectHandler.java
rename to tools/ahat/src/main/com/android/ahat/ObjectHandler.java
index 79f8b76..bfd5d5c 100644
--- a/tools/ahat/src/ObjectHandler.java
+++ b/tools/ahat/src/main/com/android/ahat/ObjectHandler.java
@@ -19,6 +19,7 @@
import com.android.ahat.heapdump.AhatArrayInstance;
import com.android.ahat.heapdump.AhatClassInstance;
import com.android.ahat.heapdump.AhatClassObj;
+import com.android.ahat.heapdump.AhatHeap;
import com.android.ahat.heapdump.AhatInstance;
import com.android.ahat.heapdump.AhatSnapshot;
import com.android.ahat.heapdump.DiffFields;
@@ -66,7 +67,10 @@
doc.big(Summarizer.summarize(inst));
printAllocationSite(doc, query, inst);
- printGcRootPath(doc, query, inst);
+
+ if (!inst.isUnreachable()) {
+ printGcRootPath(doc, query, inst);
+ }
doc.section("Object Info");
AhatClassObj cls = inst.getClassObj();
@@ -257,23 +261,54 @@
if (bitmap != null) {
doc.section("Bitmap Image");
doc.println(DocString.image(
- DocString.formattedUri("bitmap?id=%d", bitmap.getId()), "bitmap image"));
+ DocString.formattedUri("bitmap?id=0x%x", bitmap.getId()), "bitmap image"));
}
}
private void printGcRootPath(Doc doc, Query query, AhatInstance inst) {
doc.section("Sample Path from GC Root");
List<PathElement> path = inst.getPathFromGcRoot();
- doc.table(new Column(""), new Column("Path Element"));
- doc.row(DocString.text("(rooted)"),
- DocString.link(DocString.uri("root"), DocString.text("ROOT")));
- for (PathElement element : path) {
- DocString label = DocString.text("→ ");
- label.append(Summarizer.summarize(element.instance));
- label.append(element.field);
- doc.row(DocString.text(element.isDominator ? "(dominator)" : ""), label);
+
+ // Add a dummy PathElement as a marker for the root.
+ final PathElement root = new PathElement(null, null);
+ path.add(0, root);
+
+ HeapTable.TableConfig<PathElement> table = new HeapTable.TableConfig<PathElement>() {
+ public String getHeapsDescription() {
+ return "Bytes Retained by Heap (Dominators Only)";
+ }
+
+ public long getSize(PathElement element, AhatHeap heap) {
+ if (element == root) {
+ return heap.getSize().getSize();
+ }
+ if (element.isDominator) {
+ return element.instance.getRetainedSize(heap).getSize();
+ }
+ return 0;
+ }
+
+ public List<HeapTable.ValueConfig<PathElement>> getValueConfigs() {
+ HeapTable.ValueConfig<PathElement> value = new HeapTable.ValueConfig<PathElement>() {
+ public String getDescription() {
+ return "Path Element";
+ }
+
+ public DocString render(PathElement element) {
+ if (element == root) {
+ return DocString.link(DocString.uri("rooted"), DocString.text("ROOT"));
+ } else {
+ DocString label = DocString.text("→ ");
+ label.append(Summarizer.summarize(element.instance));
+ label.append(element.field);
+ return label;
+ }
+ }
+ };
+ return Collections.singletonList(value);
+ }
};
- doc.end();
+ HeapTable.render(doc, query, DOMINATOR_PATH_ID, table, mSnapshot, path);
}
public void printDominatedObjects(Doc doc, Query query, AhatInstance inst) {
diff --git a/tools/ahat/src/ObjectsHandler.java b/tools/ahat/src/main/com/android/ahat/ObjectsHandler.java
similarity index 100%
rename from tools/ahat/src/ObjectsHandler.java
rename to tools/ahat/src/main/com/android/ahat/ObjectsHandler.java
diff --git a/tools/ahat/src/OverviewHandler.java b/tools/ahat/src/main/com/android/ahat/OverviewHandler.java
similarity index 100%
rename from tools/ahat/src/OverviewHandler.java
rename to tools/ahat/src/main/com/android/ahat/OverviewHandler.java
diff --git a/tools/ahat/src/Query.java b/tools/ahat/src/main/com/android/ahat/Query.java
similarity index 95%
rename from tools/ahat/src/Query.java
rename to tools/ahat/src/main/com/android/ahat/Query.java
index f910608..9c2783c 100644
--- a/tools/ahat/src/Query.java
+++ b/tools/ahat/src/main/com/android/ahat/Query.java
@@ -65,7 +65,7 @@
*/
public long getLong(String name, long defaultValue) {
String value = get(name, null);
- return value == null ? defaultValue : Long.parseLong(value);
+ return value == null ? defaultValue : Long.decode(value);
}
/**
@@ -73,7 +73,7 @@
*/
public int getInt(String name, int defaultValue) {
String value = get(name, null);
- return value == null ? defaultValue : Integer.parseInt(value);
+ return value == null ? defaultValue : Integer.decode(value);
}
/**
diff --git a/tools/ahat/src/RootedHandler.java b/tools/ahat/src/main/com/android/ahat/RootedHandler.java
similarity index 100%
rename from tools/ahat/src/RootedHandler.java
rename to tools/ahat/src/main/com/android/ahat/RootedHandler.java
diff --git a/tools/ahat/src/SiteHandler.java b/tools/ahat/src/main/com/android/ahat/SiteHandler.java
similarity index 100%
rename from tools/ahat/src/SiteHandler.java
rename to tools/ahat/src/main/com/android/ahat/SiteHandler.java
diff --git a/tools/ahat/src/SitePrinter.java b/tools/ahat/src/main/com/android/ahat/SitePrinter.java
similarity index 100%
rename from tools/ahat/src/SitePrinter.java
rename to tools/ahat/src/main/com/android/ahat/SitePrinter.java
diff --git a/tools/ahat/src/SizeTable.java b/tools/ahat/src/main/com/android/ahat/SizeTable.java
similarity index 100%
rename from tools/ahat/src/SizeTable.java
rename to tools/ahat/src/main/com/android/ahat/SizeTable.java
diff --git a/tools/ahat/src/StaticHandler.java b/tools/ahat/src/main/com/android/ahat/StaticHandler.java
similarity index 100%
rename from tools/ahat/src/StaticHandler.java
rename to tools/ahat/src/main/com/android/ahat/StaticHandler.java
diff --git a/tools/ahat/src/SubsetSelector.java b/tools/ahat/src/main/com/android/ahat/SubsetSelector.java
similarity index 100%
rename from tools/ahat/src/SubsetSelector.java
rename to tools/ahat/src/main/com/android/ahat/SubsetSelector.java
diff --git a/tools/ahat/src/Summarizer.java b/tools/ahat/src/main/com/android/ahat/Summarizer.java
similarity index 93%
rename from tools/ahat/src/Summarizer.java
rename to tools/ahat/src/main/com/android/ahat/Summarizer.java
index 50b2e4b..ae0776a 100644
--- a/tools/ahat/src/Summarizer.java
+++ b/tools/ahat/src/main/com/android/ahat/Summarizer.java
@@ -51,7 +51,9 @@
}
// Annotate unreachable objects as such.
- if (!inst.isReachable()) {
+ if (inst.isWeaklyReachable()) {
+ formatted.append("weak ");
+ } else if (inst.isUnreachable()) {
formatted.append("unreachable ");
}
@@ -65,7 +67,7 @@
// Don't make links to placeholder objects.
formatted.append(linkText);
} else {
- URI objTarget = DocString.formattedUri("object?id=%d", inst.getId());
+ URI objTarget = DocString.formattedUri("object?id=0x%x", inst.getId());
formatted.appendLink(objTarget, linkText);
}
@@ -100,7 +102,7 @@
AhatInstance bitmap = inst.getAssociatedBitmapInstance();
String thumbnail = "";
if (bitmap != null) {
- URI uri = DocString.formattedUri("bitmap?id=%d", bitmap.getId());
+ URI uri = DocString.formattedUri("bitmap?id=0x%x", bitmap.getId());
formatted.appendThumbnail(uri, "bitmap image");
}
return formatted;
diff --git a/tools/ahat/src/dominators/DominatorsComputation.java b/tools/ahat/src/main/com/android/ahat/dominators/DominatorsComputation.java
similarity index 100%
rename from tools/ahat/src/dominators/DominatorsComputation.java
rename to tools/ahat/src/main/com/android/ahat/dominators/DominatorsComputation.java
diff --git a/tools/ahat/src/heapdump/AhatArrayInstance.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatArrayInstance.java
similarity index 100%
rename from tools/ahat/src/heapdump/AhatArrayInstance.java
rename to tools/ahat/src/main/com/android/ahat/heapdump/AhatArrayInstance.java
diff --git a/tools/ahat/src/heapdump/AhatClassInstance.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatClassInstance.java
similarity index 100%
rename from tools/ahat/src/heapdump/AhatClassInstance.java
rename to tools/ahat/src/main/com/android/ahat/heapdump/AhatClassInstance.java
diff --git a/tools/ahat/src/heapdump/AhatClassObj.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatClassObj.java
similarity index 100%
rename from tools/ahat/src/heapdump/AhatClassObj.java
rename to tools/ahat/src/main/com/android/ahat/heapdump/AhatClassObj.java
diff --git a/tools/ahat/src/heapdump/AhatField.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatField.java
similarity index 100%
rename from tools/ahat/src/heapdump/AhatField.java
rename to tools/ahat/src/main/com/android/ahat/heapdump/AhatField.java
diff --git a/tools/ahat/src/heapdump/AhatHeap.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatHeap.java
similarity index 100%
rename from tools/ahat/src/heapdump/AhatHeap.java
rename to tools/ahat/src/main/com/android/ahat/heapdump/AhatHeap.java
diff --git a/tools/ahat/src/heapdump/AhatInstance.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java
similarity index 95%
rename from tools/ahat/src/heapdump/AhatInstance.java
rename to tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java
index c044487..cb2d738 100644
--- a/tools/ahat/src/heapdump/AhatInstance.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java
@@ -136,13 +136,28 @@
}
/**
- * Returns whether this object is strongly-reachable.
+ * Returns true if this object is strongly-reachable.
*/
- public boolean isReachable() {
+ public boolean isStronglyReachable() {
return mImmediateDominator != null;
}
/**
+ * Returns true if this object is reachable only through a
+ * soft/weak/phantom/finalizer reference.
+ */
+ public boolean isWeaklyReachable() {
+ return !isStronglyReachable() && mNextInstanceToGcRoot != null;
+ }
+
+ /**
+ * Returns true if this object is completely unreachable.
+ */
+ public boolean isUnreachable() {
+ return !isStronglyReachable() && !isWeaklyReachable();
+ }
+
+ /**
* Returns the heap that this instance is allocated on.
*/
public AhatHeap getHeap() {
@@ -499,6 +514,10 @@
} else {
if (ref.ref.mSoftReverseReferences == null) {
ref.ref.mSoftReverseReferences = new ArrayList<AhatInstance>();
+ if (ref.ref.mNextInstanceToGcRoot == null) {
+ ref.ref.mNextInstanceToGcRoot = ref.src;
+ ref.ref.mNextInstanceToGcRootField = ref.field;
+ }
}
ref.ref.mSoftReverseReferences.add(ref.src);
}
diff --git a/tools/ahat/src/heapdump/AhatPlaceHolderClassObj.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatPlaceHolderClassObj.java
similarity index 100%
rename from tools/ahat/src/heapdump/AhatPlaceHolderClassObj.java
rename to tools/ahat/src/main/com/android/ahat/heapdump/AhatPlaceHolderClassObj.java
diff --git a/tools/ahat/src/heapdump/AhatPlaceHolderInstance.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatPlaceHolderInstance.java
similarity index 100%
rename from tools/ahat/src/heapdump/AhatPlaceHolderInstance.java
rename to tools/ahat/src/main/com/android/ahat/heapdump/AhatPlaceHolderInstance.java
diff --git a/tools/ahat/src/heapdump/AhatSnapshot.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatSnapshot.java
similarity index 100%
rename from tools/ahat/src/heapdump/AhatSnapshot.java
rename to tools/ahat/src/main/com/android/ahat/heapdump/AhatSnapshot.java
diff --git a/tools/ahat/src/heapdump/Diff.java b/tools/ahat/src/main/com/android/ahat/heapdump/Diff.java
similarity index 100%
rename from tools/ahat/src/heapdump/Diff.java
rename to tools/ahat/src/main/com/android/ahat/heapdump/Diff.java
diff --git a/tools/ahat/src/heapdump/DiffFields.java b/tools/ahat/src/main/com/android/ahat/heapdump/DiffFields.java
similarity index 100%
rename from tools/ahat/src/heapdump/DiffFields.java
rename to tools/ahat/src/main/com/android/ahat/heapdump/DiffFields.java
diff --git a/tools/ahat/src/heapdump/Diffable.java b/tools/ahat/src/main/com/android/ahat/heapdump/Diffable.java
similarity index 100%
rename from tools/ahat/src/heapdump/Diffable.java
rename to tools/ahat/src/main/com/android/ahat/heapdump/Diffable.java
diff --git a/tools/ahat/src/heapdump/DiffedFieldValue.java b/tools/ahat/src/main/com/android/ahat/heapdump/DiffedFieldValue.java
similarity index 100%
rename from tools/ahat/src/heapdump/DiffedFieldValue.java
rename to tools/ahat/src/main/com/android/ahat/heapdump/DiffedFieldValue.java
diff --git a/tools/ahat/src/heapdump/DominatorReferenceIterator.java b/tools/ahat/src/main/com/android/ahat/heapdump/DominatorReferenceIterator.java
similarity index 100%
rename from tools/ahat/src/heapdump/DominatorReferenceIterator.java
rename to tools/ahat/src/main/com/android/ahat/heapdump/DominatorReferenceIterator.java
diff --git a/tools/ahat/src/heapdump/Field.java b/tools/ahat/src/main/com/android/ahat/heapdump/Field.java
similarity index 100%
rename from tools/ahat/src/heapdump/Field.java
rename to tools/ahat/src/main/com/android/ahat/heapdump/Field.java
diff --git a/tools/ahat/src/heapdump/FieldValue.java b/tools/ahat/src/main/com/android/ahat/heapdump/FieldValue.java
similarity index 100%
rename from tools/ahat/src/heapdump/FieldValue.java
rename to tools/ahat/src/main/com/android/ahat/heapdump/FieldValue.java
diff --git a/tools/ahat/src/heapdump/HprofFormatException.java b/tools/ahat/src/main/com/android/ahat/heapdump/HprofFormatException.java
similarity index 100%
rename from tools/ahat/src/heapdump/HprofFormatException.java
rename to tools/ahat/src/main/com/android/ahat/heapdump/HprofFormatException.java
diff --git a/tools/ahat/src/heapdump/Instances.java b/tools/ahat/src/main/com/android/ahat/heapdump/Instances.java
similarity index 100%
rename from tools/ahat/src/heapdump/Instances.java
rename to tools/ahat/src/main/com/android/ahat/heapdump/Instances.java
diff --git a/tools/ahat/src/heapdump/Parser.java b/tools/ahat/src/main/com/android/ahat/heapdump/Parser.java
similarity index 100%
rename from tools/ahat/src/heapdump/Parser.java
rename to tools/ahat/src/main/com/android/ahat/heapdump/Parser.java
diff --git a/tools/ahat/src/heapdump/PathElement.java b/tools/ahat/src/main/com/android/ahat/heapdump/PathElement.java
similarity index 100%
rename from tools/ahat/src/heapdump/PathElement.java
rename to tools/ahat/src/main/com/android/ahat/heapdump/PathElement.java
diff --git a/tools/ahat/src/heapdump/Reference.java b/tools/ahat/src/main/com/android/ahat/heapdump/Reference.java
similarity index 100%
rename from tools/ahat/src/heapdump/Reference.java
rename to tools/ahat/src/main/com/android/ahat/heapdump/Reference.java
diff --git a/tools/ahat/src/heapdump/RootType.java b/tools/ahat/src/main/com/android/ahat/heapdump/RootType.java
similarity index 100%
rename from tools/ahat/src/heapdump/RootType.java
rename to tools/ahat/src/main/com/android/ahat/heapdump/RootType.java
diff --git a/tools/ahat/src/heapdump/Site.java b/tools/ahat/src/main/com/android/ahat/heapdump/Site.java
similarity index 99%
rename from tools/ahat/src/heapdump/Site.java
rename to tools/ahat/src/main/com/android/ahat/heapdump/Site.java
index 821493f..523550a 100644
--- a/tools/ahat/src/heapdump/Site.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/Site.java
@@ -186,7 +186,7 @@
// Add all reachable objects allocated at this site.
for (AhatInstance inst : mObjects) {
- if (inst.isReachable()) {
+ if (inst.isStronglyReachable()) {
AhatHeap heap = inst.getHeap();
Size size = inst.getSize();
ObjectsInfo info = getObjectsInfo(heap, inst.getClassObj());
diff --git a/tools/ahat/src/heapdump/Size.java b/tools/ahat/src/main/com/android/ahat/heapdump/Size.java
similarity index 100%
rename from tools/ahat/src/heapdump/Size.java
rename to tools/ahat/src/main/com/android/ahat/heapdump/Size.java
diff --git a/tools/ahat/src/heapdump/SkipNullsIterator.java b/tools/ahat/src/main/com/android/ahat/heapdump/SkipNullsIterator.java
similarity index 100%
rename from tools/ahat/src/heapdump/SkipNullsIterator.java
rename to tools/ahat/src/main/com/android/ahat/heapdump/SkipNullsIterator.java
diff --git a/tools/ahat/src/heapdump/Sort.java b/tools/ahat/src/main/com/android/ahat/heapdump/Sort.java
similarity index 100%
rename from tools/ahat/src/heapdump/Sort.java
rename to tools/ahat/src/main/com/android/ahat/heapdump/Sort.java
diff --git a/tools/ahat/src/heapdump/SuperRoot.java b/tools/ahat/src/main/com/android/ahat/heapdump/SuperRoot.java
similarity index 100%
rename from tools/ahat/src/heapdump/SuperRoot.java
rename to tools/ahat/src/main/com/android/ahat/heapdump/SuperRoot.java
diff --git a/tools/ahat/src/heapdump/Type.java b/tools/ahat/src/main/com/android/ahat/heapdump/Type.java
similarity index 100%
rename from tools/ahat/src/heapdump/Type.java
rename to tools/ahat/src/main/com/android/ahat/heapdump/Type.java
diff --git a/tools/ahat/src/heapdump/Value.java b/tools/ahat/src/main/com/android/ahat/heapdump/Value.java
similarity index 100%
rename from tools/ahat/src/heapdump/Value.java
rename to tools/ahat/src/main/com/android/ahat/heapdump/Value.java
diff --git a/tools/ahat/src/proguard/ProguardMap.java b/tools/ahat/src/main/com/android/ahat/proguard/ProguardMap.java
similarity index 100%
rename from tools/ahat/src/proguard/ProguardMap.java
rename to tools/ahat/src/main/com/android/ahat/proguard/ProguardMap.java
diff --git a/tools/ahat/test-dump/Main.java b/tools/ahat/src/test-dump/Main.java
similarity index 100%
rename from tools/ahat/test-dump/Main.java
rename to tools/ahat/src/test-dump/Main.java
diff --git a/tools/ahat/test/DiffFieldsTest.java b/tools/ahat/src/test/com/android/ahat/DiffFieldsTest.java
similarity index 100%
rename from tools/ahat/test/DiffFieldsTest.java
rename to tools/ahat/src/test/com/android/ahat/DiffFieldsTest.java
diff --git a/tools/ahat/test/DiffTest.java b/tools/ahat/src/test/com/android/ahat/DiffTest.java
similarity index 100%
rename from tools/ahat/test/DiffTest.java
rename to tools/ahat/src/test/com/android/ahat/DiffTest.java
diff --git a/tools/ahat/test/DominatorsTest.java b/tools/ahat/src/test/com/android/ahat/DominatorsTest.java
similarity index 100%
rename from tools/ahat/test/DominatorsTest.java
rename to tools/ahat/src/test/com/android/ahat/DominatorsTest.java
diff --git a/tools/ahat/test/HtmlEscaperTest.java b/tools/ahat/src/test/com/android/ahat/HtmlEscaperTest.java
similarity index 100%
rename from tools/ahat/test/HtmlEscaperTest.java
rename to tools/ahat/src/test/com/android/ahat/HtmlEscaperTest.java
diff --git a/tools/ahat/test/InstanceTest.java b/tools/ahat/src/test/com/android/ahat/InstanceTest.java
similarity index 98%
rename from tools/ahat/test/InstanceTest.java
rename to tools/ahat/src/test/com/android/ahat/InstanceTest.java
index 49a21e2..a4908fd 100644
--- a/tools/ahat/test/InstanceTest.java
+++ b/tools/ahat/src/test/com/android/ahat/InstanceTest.java
@@ -214,7 +214,9 @@
// reference as having a non-null referent.
TestDump dump = TestDump.getTestDump();
AhatInstance ref = dump.getDumpedAhatInstance("aSoftReference");
- assertNotNull(ref.getReferent());
+ AhatInstance referent = ref.getReferent();
+ assertNotNull(referent);
+ assertTrue(referent.isWeaklyReachable());
}
@Test
diff --git a/tools/ahat/test/NativeAllocationTest.java b/tools/ahat/src/test/com/android/ahat/NativeAllocationTest.java
similarity index 100%
rename from tools/ahat/test/NativeAllocationTest.java
rename to tools/ahat/src/test/com/android/ahat/NativeAllocationTest.java
diff --git a/tools/ahat/test/ObjectHandlerTest.java b/tools/ahat/src/test/com/android/ahat/ObjectHandlerTest.java
similarity index 100%
rename from tools/ahat/test/ObjectHandlerTest.java
rename to tools/ahat/src/test/com/android/ahat/ObjectHandlerTest.java
diff --git a/tools/ahat/test/OverviewHandlerTest.java b/tools/ahat/src/test/com/android/ahat/OverviewHandlerTest.java
similarity index 100%
rename from tools/ahat/test/OverviewHandlerTest.java
rename to tools/ahat/src/test/com/android/ahat/OverviewHandlerTest.java
diff --git a/tools/ahat/test/PerformanceTest.java b/tools/ahat/src/test/com/android/ahat/PerformanceTest.java
similarity index 100%
rename from tools/ahat/test/PerformanceTest.java
rename to tools/ahat/src/test/com/android/ahat/PerformanceTest.java
diff --git a/tools/ahat/test/ProguardMapTest.java b/tools/ahat/src/test/com/android/ahat/ProguardMapTest.java
similarity index 100%
rename from tools/ahat/test/ProguardMapTest.java
rename to tools/ahat/src/test/com/android/ahat/ProguardMapTest.java
diff --git a/tools/ahat/test/QueryTest.java b/tools/ahat/src/test/com/android/ahat/QueryTest.java
similarity index 100%
rename from tools/ahat/test/QueryTest.java
rename to tools/ahat/src/test/com/android/ahat/QueryTest.java
diff --git a/tools/ahat/test/RootedHandlerTest.java b/tools/ahat/src/test/com/android/ahat/RootedHandlerTest.java
similarity index 100%
rename from tools/ahat/test/RootedHandlerTest.java
rename to tools/ahat/src/test/com/android/ahat/RootedHandlerTest.java
diff --git a/tools/ahat/test/SiteHandlerTest.java b/tools/ahat/src/test/com/android/ahat/SiteHandlerTest.java
similarity index 100%
rename from tools/ahat/test/SiteHandlerTest.java
rename to tools/ahat/src/test/com/android/ahat/SiteHandlerTest.java
diff --git a/tools/ahat/test/SiteTest.java b/tools/ahat/src/test/com/android/ahat/SiteTest.java
similarity index 100%
rename from tools/ahat/test/SiteTest.java
rename to tools/ahat/src/test/com/android/ahat/SiteTest.java
diff --git a/tools/ahat/test/TestDump.java b/tools/ahat/src/test/com/android/ahat/TestDump.java
similarity index 100%
rename from tools/ahat/test/TestDump.java
rename to tools/ahat/src/test/com/android/ahat/TestDump.java
diff --git a/tools/ahat/test/TestHandler.java b/tools/ahat/src/test/com/android/ahat/TestHandler.java
similarity index 100%
rename from tools/ahat/test/TestHandler.java
rename to tools/ahat/src/test/com/android/ahat/TestHandler.java
diff --git a/tools/ahat/test/Tests.java b/tools/ahat/src/test/com/android/ahat/Tests.java
similarity index 100%
rename from tools/ahat/test/Tests.java
rename to tools/ahat/src/test/com/android/ahat/Tests.java
diff --git a/tools/ahat/test-dump/README.txt b/tools/ahat/test-dump/README.txt
deleted file mode 100644
index e7ea584..0000000
--- a/tools/ahat/test-dump/README.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-
-Main.java - A program used to generate a heap dump used for tests.
-L.hprof - A version of the test dump generated on Android L,
- with one of the ROOT_DEBUGGER records manually changed to a
- ROOT_FINALIZING record.
-O.hprof - A version of the test dump generated on Android O.
-RI.hprof - A version of the test dump generated on the reference implementation.
diff --git a/tools/art b/tools/art
index 15993dd..80a8090 100644
--- a/tools/art
+++ b/tools/art
@@ -220,6 +220,11 @@
echo "$image_location"
}
+# If android logging is not explicitly set, only print warnings and errors.
+if [ -z "$ANDROID_LOG_TAGS" ]; then
+ ANDROID_LOG_TAGS='*:w'
+fi
+
# Runs dalvikvm, returns its exit code.
# (Oat directories are cleaned up in between runs)
function run_art() {
@@ -229,15 +234,16 @@
# First cleanup any left-over 'oat' files from the last time dalvikvm was run.
cleanup_oat_directory_for_classpath "$@"
# Run dalvikvm.
- verbose_run ANDROID_DATA=$ANDROID_DATA \
- ANDROID_ROOT=$ANDROID_ROOT \
- LD_LIBRARY_PATH=$LD_LIBRARY_PATH \
- PATH=$ANDROID_ROOT/bin:$PATH \
- LD_USE_LOAD_BIAS=1 \
- $LAUNCH_WRAPPER $ART_BINARY_PATH $lib \
- -XXlib:$LIBART \
- -Xnorelocate \
- -Ximage:"$image_location" \
+ verbose_run ANDROID_DATA="$ANDROID_DATA" \
+ ANDROID_ROOT="$ANDROID_ROOT" \
+ LD_LIBRARY_PATH="$LD_LIBRARY_PATH" \
+ PATH="$ANDROID_ROOT/bin:$PATH" \
+ LD_USE_LOAD_BIAS=1 \
+ ANDROID_LOG_TAGS="$ANDROID_LOG_TAGS" \
+ $LAUNCH_WRAPPER $ART_BINARY_PATH $lib \
+ -XXlib:"$LIBART" \
+ -Xnorelocate \
+ -Ximage:"$image_location" \
"$@"
ret=$?
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index 4f99ac3..ab604b2 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -70,6 +70,7 @@
make_command="make $j_arg $showcommands build-art-host-tests $common_targets dx-tests"
make_command+=" ${out_dir}/host/linux-x86/lib/libjavacoretests.so "
make_command+=" ${out_dir}/host/linux-x86/lib64/libjavacoretests.so"
+ make_command+=" libwrapagentpropertiesd libwrapagentproperties"
elif [[ $mode == "target" ]]; then
make_command="make $j_arg $showcommands build-art-target-tests $common_targets"
make_command+=" libjavacrypto libjavacoretests libnetd_client linker toybox toolbox sh"
diff --git a/tools/dmtracedump/Android.bp b/tools/dmtracedump/Android.bp
index 4f942bd..9f207e9 100644
--- a/tools/dmtracedump/Android.bp
+++ b/tools/dmtracedump/Android.bp
@@ -23,6 +23,7 @@
"-O0",
"-g",
"-Wall",
+ "-Werror",
],
target: {
windows: {
@@ -40,5 +41,6 @@
"-O0",
"-g",
"-Wall",
+ "-Werror",
],
}
diff --git a/tools/libjdwp_art_failures.txt b/tools/libjdwp_art_failures.txt
index 6b5daec..8d67c45 100644
--- a/tools/libjdwp_art_failures.txt
+++ b/tools/libjdwp_art_failures.txt
@@ -48,7 +48,7 @@
name: "org.apache.harmony.jpda.tests.jdwp.Events.VMDeath002Test#testVMDeathRequest"
},
{
- description: "Test fails with INTERNAL error due to proxy frame!",
+ description: "Test fails with OPAQUE_FRAME error due to attempting a GetLocalReference on a proxy frame instead of GetLocalInstance!",
result: EXEC_FAILED,
bug: 66903662,
name: "org.apache.harmony.jpda.tests.jdwp.StackFrame.ProxyThisObjectTest#testThisObject"
@@ -71,6 +71,33 @@
"org.apache.harmony.jpda.tests.jdwp.EventModifiers.InstanceOnlyModifierTest#testMethodExit",
"org.apache.harmony.jpda.tests.jdwp.EventModifiers.InstanceOnlyModifierTest#testMethodExitWithReturnValue" ]
},
+/* TODO Investigate these failures more closely */
+{
+ description: "Tests that fail when run on the chromium buildbots against the prebuilt libjdwp.so in certain configurations",
+ result: EXEC_FAILED,
+ bug: 67497270,
+ names: [
+ "org.apache.harmony.jpda.tests.jdwp.Events.CombinedEvents003Test#testCombinedEvents003_01",
+ "org.apache.harmony.jpda.tests.jdwp.Events.CombinedEventsTest#testCombinedEvents_01",
+ "org.apache.harmony.jpda.tests.jdwp.Events.CombinedEventsTest#testCombinedEvents_02",
+ "org.apache.harmony.jpda.tests.jdwp.Events.CombinedEventsTest#testCombinedEvents_03",
+ "org.apache.harmony.jpda.tests.jdwp.Events.CombinedEventsTest#testCombinedEvents_04",
+ "org.apache.harmony.jpda.tests.jdwp.Events.CombinedEventsTest#testCombinedEvents_06",
+ "org.apache.harmony.jpda.tests.jdwp.Events.VMDeathTest#testVMDeathEvent",
+ "org.apache.harmony.jpda.tests.jdwp.MultiSession.ClassPrepareTest#testClassPrepare001",
+ "org.apache.harmony.jpda.tests.jdwp.MultiSession.ExceptionTest#testException001",
+ "org.apache.harmony.jpda.tests.jdwp.MultiSession.FieldAccessTest#testFieldAccess001",
+ "org.apache.harmony.jpda.tests.jdwp.MultiSession.FieldModificationTest#testFieldModification001",
+ "org.apache.harmony.jpda.tests.jdwp.MultiSession.SingleStepTest#testSingleStep001",
+ "org.apache.harmony.jpda.tests.jdwp.MultiSession.VMDeathTest#testVMDeathRequest",
+ "org.apache.harmony.jpda.tests.jdwp.ReferenceType.SignatureWithGenericTest#testSignatureWithGeneric001",
+ "org.apache.harmony.jpda.tests.jdwp.StackFrame.GetValues002Test#testGetValues005_Int2",
+ "org.apache.harmony.jpda.tests.jdwp.VirtualMachine.SetDefaultStratumTest#testSetDefaultStratum001",
+ "org.apache.harmony.jpda.tests.jdwp.ThreadReference.StatusTest#testStatus001",
+ "org.apache.harmony.jpda.tests.jdwp.VirtualMachine.AllClassesTest#testAllClasses002",
+ "org.apache.harmony.jpda.tests.jdwp.VirtualMachine.AllClassesWithGenericTest#testAllClassesWithGeneric001"
+ ]
+},
/* TODO Categorize these failures more. */
{
description: "Tests that fail on both ART and RI. These tests are likely incorrect",
diff --git a/tools/run-prebuilt-libjdwp-tests.sh b/tools/run-prebuilt-libjdwp-tests.sh
new file mode 100755
index 0000000..46c2a15
--- /dev/null
+++ b/tools/run-prebuilt-libjdwp-tests.sh
@@ -0,0 +1,112 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [[ ! -d libcore ]]; then
+ echo "Script needs to be run at the root of the android tree"
+ exit 1
+fi
+
+source build/envsetup.sh >&/dev/null # for get_build_var, setpaths
+setpaths # include platform prebuilt java, javac, etc in $PATH.
+
+if [[ `uname` != 'Linux' ]]; then
+ echo "Script cannot be run on $(uname). It is Linux only."
+ exit 2
+fi
+
+jdwp_path=${ANDROID_JAVA_HOME}/jre/lib/amd64/libjdwp.so
+if [[ ! -f $jdwp_path ]]; then
+ echo "Unable to find prebuilts libjdwp.so! Did the version change from jdk8?"
+ exit 3
+fi
+
+args=("$@")
+debug="no"
+has_variant="no"
+has_mode="no"
+
+while true; do
+ if [[ $1 == "--debug" ]]; then
+ debug="yes"
+ shift
+ elif [[ "$1" == --mode=* ]]; then
+ has_mode="yes"
+ if [[ $1 != "--mode=host" ]]; then
+ # Just print out an actually helpful error message.
+ echo "Only host tests can be run against prebuilt libjdwp"
+ exit 4
+ fi
+ shift
+ elif [[ $1 == --variant=* ]]; then
+ has_variant="yes"
+ if [[ $1 != "--variant=x64" ]] && [[ $1 != "--variant=X64" ]]; then
+ # Just print out an actually helpful error message.
+ echo "Only 64bit runs can be tested against the prebuilt libjdwp!"
+ exit 5
+ fi
+ shift
+ elif [[ "$1" == "" ]]; then
+ break
+ else
+ shift
+ fi
+done
+
+if [[ "$has_mode" = "no" ]]; then
+ args+=(--mode=host)
+fi
+
+if [[ "$has_variant" = "no" ]]; then
+ args+=(--variant=X64)
+fi
+
+wrapper_name=""
+plugin=""
+if [[ "$debug" = "yes" ]]; then
+ wrapper_name=libwrapagentpropertiesd
+ plugin="$ANDROID_HOST_OUT/lib64/libopenjdkjvmtid.so"
+else
+ wrapper_name=libwrapagentproperties
+ plugin="$ANDROID_HOST_OUT/lib64/libopenjdkjvmti.so"
+fi
+wrapper=$ANDROID_HOST_OUT/lib64/${wrapper_name}.so
+
+if [[ ! -f $wrapper ]]; then
+ echo "need to build $wrapper to run prebuild-libjdwp-tests!"
+ echo "m -j40 ${wrapper/.so/}"
+ exit 6
+fi
+
+if [[ ! -f $plugin ]]; then
+ echo "jvmti plugin not built!"
+ exit 7
+fi
+
+props_path=$PWD/art/tools/libjdwp-compat.props
+expect_path=$PWD/art/tools/libjdwp_art_failures.txt
+
+function verbose_run() {
+ echo "$@"
+ env "$@"
+}
+
+verbose_run LD_LIBRARY_PATH="$(dirname $jdwp_path):$LD_LIBRARY_PATH" \
+ ./art/tools/run-jdwp-tests.sh \
+ "${args[@]}" \
+ "-Xplugin:$plugin" \
+ --agent-wrapper "${wrapper}"="${props_path}" \
+ --jdwp-path "$jdwp_path" \
+ --expectations "$expect_path"