Merge "ART: JIT code cache allocation cleanup"
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index e4efbef..0b3ac20 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -2521,12 +2521,6 @@
__ cfi().RelOffsetForMany(DWARFReg(S0), 0, fpu_spill_mask_, kArmWordSize);
}
- if (GetGraph()->HasShouldDeoptimizeFlag()) {
- // Initialize should_deoptimize flag to 0.
- __ mov(IP, ShifterOperand(0));
- __ StoreToOffset(kStoreWord, IP, SP, -kShouldDeoptimizeFlagSize);
- }
-
int adjust = GetFrameSize() - FrameEntrySpillSize();
__ AddConstant(SP, -adjust);
__ cfi().AdjustCFAOffset(adjust);
@@ -2537,6 +2531,12 @@
if (RequiresCurrentMethod()) {
__ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, 0);
}
+
+ if (GetGraph()->HasShouldDeoptimizeFlag()) {
+ // Initialize should_deoptimize flag to 0.
+ __ mov(IP, ShifterOperand(0));
+ __ StoreToOffset(kStoreWord, IP, SP, GetStackOffsetOfShouldDeoptimizeFlag());
+ }
}
void CodeGeneratorARM::GenerateFrameExit() {
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index c6bd871..a8b00c3 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2659,14 +2659,6 @@
GetAssembler()->cfi().RelOffsetForMany(DWARFReg(s0), 0, fpu_spill_mask_, kArmWordSize);
}
- if (GetGraph()->HasShouldDeoptimizeFlag()) {
- UseScratchRegisterScope temps(GetVIXLAssembler());
- vixl32::Register temp = temps.Acquire();
- // Initialize should_deoptimize flag to 0.
- __ Mov(temp, 0);
- GetAssembler()->StoreToOffset(kStoreWord, temp, sp, -kShouldDeoptimizeFlagSize);
- }
-
int adjust = GetFrameSize() - FrameEntrySpillSize();
__ Sub(sp, sp, adjust);
GetAssembler()->cfi().AdjustCFAOffset(adjust);
@@ -2677,6 +2669,14 @@
if (RequiresCurrentMethod()) {
GetAssembler()->StoreToOffset(kStoreWord, kMethodRegister, sp, 0);
}
+
+ if (GetGraph()->HasShouldDeoptimizeFlag()) {
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ // Initialize should_deoptimize flag to 0.
+ __ Mov(temp, 0);
+ GetAssembler()->StoreToOffset(kStoreWord, temp, sp, GetStackOffsetOfShouldDeoptimizeFlag());
+ }
}
void CodeGeneratorARMVIXL::GenerateFrameExit() {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 83a261d..79fccfe 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1086,11 +1086,6 @@
}
}
- if (GetGraph()->HasShouldDeoptimizeFlag()) {
- // Initialize should_deoptimize flag to 0.
- __ movl(Address(ESP, -kShouldDeoptimizeFlagSize), Immediate(0));
- }
-
int adjust = GetFrameSize() - FrameEntrySpillSize();
__ subl(ESP, Immediate(adjust));
__ cfi().AdjustCFAOffset(adjust);
@@ -1100,6 +1095,11 @@
if (RequiresCurrentMethod()) {
__ movl(Address(ESP, kCurrentMethodStackOffset), kMethodRegisterArgument);
}
+
+ if (GetGraph()->HasShouldDeoptimizeFlag()) {
+ // Initialize should_deoptimize flag to 0.
+ __ movl(Address(ESP, GetStackOffsetOfShouldDeoptimizeFlag()), Immediate(0));
+ }
}
void CodeGeneratorX86::GenerateFrameExit() {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 7331a9e..57319ce 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1298,12 +1298,6 @@
}
}
- if (GetGraph()->HasShouldDeoptimizeFlag()) {
- // Initialize should_deoptimize flag to 0.
- __ movl(Address(CpuRegister(RSP), xmm_spill_location - kShouldDeoptimizeFlagSize),
- Immediate(0));
- }
-
// Save the current method if we need it. Note that we do not
// do this in HCurrentMethod, as the instruction might have been removed
// in the SSA graph.
@@ -1311,6 +1305,11 @@
__ movq(Address(CpuRegister(RSP), kCurrentMethodStackOffset),
CpuRegister(kMethodRegisterArgument));
}
+
+ if (GetGraph()->HasShouldDeoptimizeFlag()) {
+ // Initialize should_deoptimize flag to 0.
+ __ movl(Address(CpuRegister(RSP), GetStackOffsetOfShouldDeoptimizeFlag()), Immediate(0));
+ }
}
void CodeGeneratorX86_64::GenerateFrameExit() {
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index d5d927c..f9267e2 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -2169,30 +2169,27 @@
// cleaning up before that (e.g. the oat writers are created before the
// runtime).
profile_compilation_info_.reset(new ProfileCompilationInfo());
- ScopedFlock flock;
- bool success = true;
+ ScopedFlock profile_file;
std::string error;
if (profile_file_fd_ != -1) {
- // The file doesn't need to be flushed so don't check the usage.
- // Pass a bogus path so that we can easily attribute any reported error.
- File file(profile_file_fd_, "profile", /*check_usage*/ false, /*read_only_mode*/ true);
- if (flock.Init(&file, &error)) {
- success = profile_compilation_info_->Load(profile_file_fd_);
- }
+ profile_file = LockedFile::DupOf(profile_file_fd_, "profile",
+ true /* read_only_mode */, &error);
} else if (profile_file_ != "") {
- if (flock.Init(profile_file_.c_str(), O_RDONLY, /* block */ true, &error)) {
- success = profile_compilation_info_->Load(flock.GetFile()->Fd());
- }
- }
- if (!error.empty()) {
- LOG(WARNING) << "Cannot lock profiles: " << error;
+ profile_file = LockedFile::Open(profile_file_.c_str(), O_RDONLY, true, &error);
}
- if (!success) {
+ // Return early if we're unable to obtain a lock on the profile.
+ if (profile_file.get() == nullptr) {
+ LOG(ERROR) << "Cannot lock profiles: " << error;
+ return false;
+ }
+
+ if (!profile_compilation_info_->Load(profile_file->Fd())) {
profile_compilation_info_.reset(nullptr);
+ return false;
}
- return success;
+ return true;
}
private:
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index 06a0f23..7773b9f 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -61,7 +61,8 @@
image_header_(image_header),
image_location_(image_location),
image_diff_pid_(image_diff_pid),
- zygote_diff_pid_(zygote_diff_pid) {}
+ zygote_diff_pid_(zygote_diff_pid),
+ zygote_pid_only_(false) {}
bool Dump() REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostream& os = *os_;
@@ -71,13 +72,18 @@
os << "IMAGE BEGIN: " << reinterpret_cast<void*>(image_header_.GetImageBegin()) << "\n\n";
+ PrintPidLine("IMAGE", image_diff_pid_);
+ os << "\n\n";
+ PrintPidLine("ZYGOTE", zygote_diff_pid_);
bool ret = true;
- if (image_diff_pid_ >= 0) {
- os << "IMAGE DIFF PID (" << image_diff_pid_ << "): ";
- ret = DumpImageDiff(image_diff_pid_, zygote_diff_pid_);
+ if (image_diff_pid_ >= 0 || zygote_diff_pid_ >= 0) {
+ if (image_diff_pid_ < 0) {
+ image_diff_pid_ = zygote_diff_pid_;
+ zygote_diff_pid_ = -1;
+ zygote_pid_only_ = true;
+ }
+ ret = DumpImageDiff();
os << "\n\n";
- } else {
- os << "IMAGE DIFF PID: disabled\n\n";
}
os << std::flush;
@@ -86,6 +92,14 @@
}
private:
+ void PrintPidLine(const std::string& kind, pid_t pid) {
+ if (pid < 0) {
+ *os_ << kind << " DIFF PID: disabled\n\n";
+ } else {
+ *os_ << kind << " DIFF PID (" << pid << "): ";
+ }
+ }
+
static bool EndsWith(const std::string& str, const std::string& suffix) {
return str.size() >= suffix.size() &&
str.compare(str.size() - suffix.size(), suffix.size(), suffix) == 0;
@@ -101,14 +115,14 @@
return str.substr(idx + 1);
}
- bool DumpImageDiff(pid_t image_diff_pid, pid_t zygote_diff_pid)
+ bool DumpImageDiff()
REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostream& os = *os_;
{
struct stat sts;
std::string proc_pid_str =
- StringPrintf("/proc/%ld", static_cast<long>(image_diff_pid)); // NOLINT [runtime/int]
+ StringPrintf("/proc/%ld", static_cast<long>(image_diff_pid_)); // NOLINT [runtime/int]
if (stat(proc_pid_str.c_str(), &sts) == -1) {
os << "Process does not exist";
return false;
@@ -116,7 +130,7 @@
}
// Open /proc/$pid/maps to view memory maps
- auto proc_maps = std::unique_ptr<BacktraceMap>(BacktraceMap::Create(image_diff_pid));
+ auto proc_maps = std::unique_ptr<BacktraceMap>(BacktraceMap::Create(image_diff_pid_));
if (proc_maps == nullptr) {
os << "Could not read backtrace maps";
return false;
@@ -145,7 +159,7 @@
}
// Future idea: diff against zygote so we can ignore the shared dirty pages.
- return DumpImageDiffMap(image_diff_pid, zygote_diff_pid, boot_map);
+ return DumpImageDiffMap(boot_map);
}
static std::string PrettyFieldValue(ArtField* field, mirror::Object* obj)
@@ -284,16 +298,14 @@
}
// Look at /proc/$pid/mem and only diff the things from there
- bool DumpImageDiffMap(pid_t image_diff_pid,
- pid_t zygote_diff_pid,
- const backtrace_map_t& boot_map)
+ bool DumpImageDiffMap(const backtrace_map_t& boot_map)
REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostream& os = *os_;
const PointerSize pointer_size = InstructionSetPointerSize(
Runtime::Current()->GetInstructionSet());
std::string file_name =
- StringPrintf("/proc/%ld/mem", static_cast<long>(image_diff_pid)); // NOLINT [runtime/int]
+ StringPrintf("/proc/%ld/mem", static_cast<long>(image_diff_pid_)); // NOLINT [runtime/int]
size_t boot_map_size = boot_map.end - boot_map.start;
@@ -347,9 +359,9 @@
std::vector<uint8_t> zygote_contents;
std::unique_ptr<File> zygote_map_file;
- if (zygote_diff_pid != -1) {
+ if (zygote_diff_pid_ != -1) {
std::string zygote_file_name =
- StringPrintf("/proc/%ld/mem", static_cast<long>(zygote_diff_pid)); // NOLINT [runtime/int]
+ StringPrintf("/proc/%ld/mem", static_cast<long>(zygote_diff_pid_)); // NOLINT [runtime/int]
zygote_map_file.reset(OS::OpenFileForReading(zygote_file_name.c_str()));
// The boot map should be at the same address.
zygote_contents.resize(boot_map_size);
@@ -360,7 +372,7 @@
}
std::string page_map_file_name = StringPrintf(
- "/proc/%ld/pagemap", static_cast<long>(image_diff_pid)); // NOLINT [runtime/int]
+ "/proc/%ld/pagemap", static_cast<long>(image_diff_pid_)); // NOLINT [runtime/int]
auto page_map_file = std::unique_ptr<File>(OS::OpenFileForReading(page_map_file_name.c_str()));
if (page_map_file == nullptr) {
os << "Failed to open " << page_map_file_name << " for reading: " << strerror(errno);
@@ -503,10 +515,14 @@
// Look up local classes by their descriptor
std::map<std::string, mirror::Class*> local_class_map;
- // Objects that are dirty against the image (possibly shared or private dirty).
+ // Image dirty objects
+ // If zygote_pid_only_ == true, these are dirty objects in the zygote.
+ // If zygote_pid_only_ == false, these are private dirty objects in the application.
std::set<mirror::Object*> image_dirty_objects;
- // Objects that are dirty against the zygote (probably private dirty).
+ // Zygote dirty objects (probably private dirty).
+ // We only add objects here if they differed in both the image and the zygote, so
+ // they are probably private dirty.
std::set<mirror::Object*> zygote_dirty_objects;
size_t dirty_object_bytes = 0;
@@ -561,7 +577,7 @@
// Different from zygote.
zygote_dirty_objects.insert(obj);
} else {
- // Just different from iamge.
+ // Just different from image.
image_dirty_objects.insert(obj);
}
@@ -658,7 +674,11 @@
class_data, [](const ClassData& d) { return d.clean_object_count; });
if (!zygote_dirty_objects.empty()) {
- os << "\n" << " Dirty objects compared to zygote (probably private dirty): "
+ // We only reach this point if both pids were specified. Furthermore,
+ // objects are only displayed here if they differed in both the image
+ // and the zygote, so they are probably private dirty.
+ CHECK(image_diff_pid_ > 0 && zygote_diff_pid_ > 0);
+ os << "\n" << " Zygote dirty objects (probably shared dirty): "
<< zygote_dirty_objects.size() << "\n";
for (mirror::Object* obj : zygote_dirty_objects) {
const uint8_t* obj_bytes = reinterpret_cast<const uint8_t*>(obj);
@@ -667,8 +687,13 @@
DiffObjectContents(obj, remote_bytes, os);
}
}
- os << "\n" << " Dirty objects compared to image (private or shared dirty): "
- << image_dirty_objects.size() << "\n";
+ os << "\n";
+ if (zygote_pid_only_) {
+ os << " Zygote dirty objects: ";
+ } else {
+ os << " Application dirty objects (private or shared dirty): ";
+ }
+ os << image_dirty_objects.size() << "\n";
for (mirror::Object* obj : image_dirty_objects) {
const uint8_t* obj_bytes = reinterpret_cast<const uint8_t*>(obj);
ptrdiff_t offset = obj_bytes - begin_image_ptr;
@@ -1013,6 +1038,7 @@
const std::string image_location_;
pid_t image_diff_pid_; // Dump image diff against boot.art if pid is non-negative
pid_t zygote_diff_pid_; // Dump image diff against zygote boot.art if pid is non-negative
+ bool zygote_pid_only_; // The user only specified a pid for the zygote.
DISALLOW_COPY_AND_ASSIGN(ImgDiagDumper);
};
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index ec3481b..848eb8d 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -304,8 +304,10 @@
TimingLogger::ScopedTiming t("Writing image File", timings_);
std::string error_msg;
- ScopedFlock img_flock;
- img_flock.Init(out, &error_msg);
+ // No error checking here, this is best effort. The locking may or may not
+ // succeed and we don't really care either way.
+ ScopedFlock img_flock = LockedFile::DupOf(out->Fd(), out->GetPath(),
+ true /* read_only_mode */, &error_msg);
CHECK(image_ != nullptr);
CHECK(out != nullptr);
diff --git a/profman/profile_assistant.cc b/profman/profile_assistant.cc
index b9a85bc..c238f0d 100644
--- a/profman/profile_assistant.cc
+++ b/profman/profile_assistant.cc
@@ -33,7 +33,7 @@
ProfileCompilationInfo info;
// Load the reference profile.
- if (!info.Load(reference_profile_file.GetFile()->Fd())) {
+ if (!info.Load(reference_profile_file->Fd())) {
LOG(WARNING) << "Could not load reference profile file";
return kErrorBadProfiles;
}
@@ -45,7 +45,7 @@
// Merge all current profiles.
for (size_t i = 0; i < profile_files.size(); i++) {
ProfileCompilationInfo cur_info;
- if (!cur_info.Load(profile_files[i].GetFile()->Fd())) {
+ if (!cur_info.Load(profile_files[i]->Fd())) {
LOG(WARNING) << "Could not load profile file at index " << i;
return kErrorBadProfiles;
}
@@ -62,11 +62,11 @@
}
// We were successful in merging all profile information. Update the reference profile.
- if (!reference_profile_file.GetFile()->ClearContent()) {
+ if (!reference_profile_file->ClearContent()) {
PLOG(WARNING) << "Could not clear reference profile file";
return kErrorIO;
}
- if (!info.Save(reference_profile_file.GetFile()->Fd())) {
+ if (!info.Save(reference_profile_file->Fd())) {
LOG(WARNING) << "Could not save reference profile file";
return kErrorIO;
}
@@ -74,26 +74,15 @@
return kCompile;
}
-static bool InitFlock(const std::string& filename, ScopedFlock& flock, std::string* error) {
- return flock.Init(filename.c_str(), O_RDWR, /* block */ true, error);
-}
-
-static bool InitFlock(int fd, ScopedFlock& flock, std::string* error) {
- DCHECK_GE(fd, 0);
- // We do not own the descriptor, so disable auto-close and don't check usage.
- File file(fd, false);
- file.DisableAutoClose();
- return flock.Init(&file, error);
-}
-
-class ScopedCollectionFlock {
+class ScopedFlockList {
public:
- explicit ScopedCollectionFlock(size_t size) : flocks_(size) {}
+ explicit ScopedFlockList(size_t size) : flocks_(size) {}
// Will block until all the locks are acquired.
bool Init(const std::vector<std::string>& filenames, /* out */ std::string* error) {
for (size_t i = 0; i < filenames.size(); i++) {
- if (!InitFlock(filenames[i], flocks_[i], error)) {
+ flocks_[i] = LockedFile::Open(filenames[i].c_str(), O_RDWR, /* block */ true, error);
+ if (flocks_[i].get() == nullptr) {
*error += " (index=" + std::to_string(i) + ")";
return false;
}
@@ -105,7 +94,9 @@
bool Init(const std::vector<int>& fds, /* out */ std::string* error) {
for (size_t i = 0; i < fds.size(); i++) {
DCHECK_GE(fds[i], 0);
- if (!InitFlock(fds[i], flocks_[i], error)) {
+ flocks_[i] = LockedFile::DupOf(fds[i], "profile-file",
+ true /* read_only_mode */, error);
+ if (flocks_[i].get() == nullptr) {
*error += " (index=" + std::to_string(i) + ")";
return false;
}
@@ -123,39 +114,47 @@
const std::vector<int>& profile_files_fd,
int reference_profile_file_fd) {
DCHECK_GE(reference_profile_file_fd, 0);
+
std::string error;
- ScopedCollectionFlock profile_files_flocks(profile_files_fd.size());
- if (!profile_files_flocks.Init(profile_files_fd, &error)) {
+ ScopedFlockList profile_files(profile_files_fd.size());
+ if (!profile_files.Init(profile_files_fd, &error)) {
LOG(WARNING) << "Could not lock profile files: " << error;
return kErrorCannotLock;
}
- ScopedFlock reference_profile_file_flock;
- if (!InitFlock(reference_profile_file_fd, reference_profile_file_flock, &error)) {
+
+ // The reference_profile_file is opened in read/write mode because it's
+ // cleared after processing.
+ ScopedFlock reference_profile_file = LockedFile::DupOf(reference_profile_file_fd,
+ "reference-profile",
+ false /* read_only_mode */,
+ &error);
+ if (reference_profile_file.get() == nullptr) {
LOG(WARNING) << "Could not lock reference profiled files: " << error;
return kErrorCannotLock;
}
- return ProcessProfilesInternal(profile_files_flocks.Get(),
- reference_profile_file_flock);
+ return ProcessProfilesInternal(profile_files.Get(), reference_profile_file);
}
ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfiles(
const std::vector<std::string>& profile_files,
const std::string& reference_profile_file) {
std::string error;
- ScopedCollectionFlock profile_files_flocks(profile_files.size());
- if (!profile_files_flocks.Init(profile_files, &error)) {
+
+ ScopedFlockList profile_files_list(profile_files.size());
+ if (!profile_files_list.Init(profile_files, &error)) {
LOG(WARNING) << "Could not lock profile files: " << error;
return kErrorCannotLock;
}
- ScopedFlock reference_profile_file_flock;
- if (!InitFlock(reference_profile_file, reference_profile_file_flock, &error)) {
+
+ ScopedFlock locked_reference_profile_file = LockedFile::Open(
+ reference_profile_file.c_str(), O_RDWR, /* block */ true, &error);
+ if (locked_reference_profile_file.get() == nullptr) {
LOG(WARNING) << "Could not lock reference profile files: " << error;
return kErrorCannotLock;
}
- return ProcessProfilesInternal(profile_files_flocks.Get(),
- reference_profile_file_flock);
+ return ProcessProfilesInternal(profile_files_list.Get(), locked_reference_profile_file);
}
} // namespace art
diff --git a/runtime/base/array_ref.h b/runtime/base/array_ref.h
index 00b9bad..630a036 100644
--- a/runtime/base/array_ref.h
+++ b/runtime/base/array_ref.h
@@ -40,17 +40,17 @@
template <typename T>
class ArrayRef {
public:
- typedef T value_type;
- typedef T& reference;
- typedef const T& const_reference;
- typedef T* pointer;
- typedef const T* const_pointer;
- typedef T* iterator;
- typedef const T* const_iterator;
- typedef std::reverse_iterator<iterator> reverse_iterator;
- typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
- typedef ptrdiff_t difference_type;
- typedef size_t size_type;
+ using value_type = T;
+ using reference = T&;
+ using const_reference = const T&;
+ using pointer = T*;
+ using const_pointer = const T*;
+ using iterator = T*;
+ using const_iterator = const T*;
+ using reverse_iterator = std::reverse_iterator<iterator>;
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+ using difference_type = ptrdiff_t;
+ using size_type = size_t;
// Constructors.
@@ -140,22 +140,22 @@
}
reference front() {
- DCHECK_NE(size_, 0u);
+ DCHECK(!empty());
return array_[0];
}
const_reference front() const {
- DCHECK_NE(size_, 0u);
+ DCHECK(!empty());
return array_[0];
}
reference back() {
- DCHECK_NE(size_, 0u);
+ DCHECK(!empty());
return array_[size_ - 1u];
}
const_reference back() const {
- DCHECK_NE(size_, 0u);
+ DCHECK(!empty());
return array_[size_ - 1u];
}
@@ -165,14 +165,17 @@
ArrayRef SubArray(size_type pos) {
return SubArray(pos, size() - pos);
}
+
ArrayRef<const T> SubArray(size_type pos) const {
return SubArray(pos, size() - pos);
}
+
ArrayRef SubArray(size_type pos, size_type length) {
DCHECK_LE(pos, size());
DCHECK_LE(length, size() - pos);
return ArrayRef(data() + pos, length);
}
+
ArrayRef<const T> SubArray(size_type pos, size_type length) const {
DCHECK_LE(pos, size());
DCHECK_LE(length, size() - pos);
diff --git a/runtime/base/array_slice.h b/runtime/base/array_slice.h
index 32283d0..0da977d 100644
--- a/runtime/base/array_slice.h
+++ b/runtime/base/array_slice.h
@@ -17,7 +17,6 @@
#ifndef ART_RUNTIME_BASE_ARRAY_SLICE_H_
#define ART_RUNTIME_BASE_ARRAY_SLICE_H_
-#include "length_prefixed_array.h"
#include "stride_iterator.h"
#include "base/bit_utils.h"
#include "base/casts.h"
@@ -27,9 +26,21 @@
// An ArraySlice is an abstraction over an array or a part of an array of a particular type. It does
// bounds checking and can be made from several common array-like structures in Art.
-template<typename T>
+template <typename T>
class ArraySlice {
public:
+ using value_type = T;
+ using reference = T&;
+ using const_reference = const T&;
+ using pointer = T*;
+ using const_pointer = const T*;
+ using iterator = StrideIterator<T>;
+ using const_iterator = StrideIterator<const T>;
+ using reverse_iterator = std::reverse_iterator<iterator>;
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+ using difference_type = ptrdiff_t;
+ using size_type = size_t;
+
// Create an empty array slice.
ArraySlice() : array_(nullptr), size_(0), element_size_(0) {}
@@ -44,85 +55,74 @@
DCHECK(array_ != nullptr || length == 0);
}
- // Create an array slice of the elements between start_offset and end_offset of the array with
- // each element being element_size bytes long. Both start_offset and end_offset are in
- // element_size units.
- ArraySlice(T* array,
- uint32_t start_offset,
- uint32_t end_offset,
- size_t element_size = sizeof(T))
- : array_(nullptr),
- size_(end_offset - start_offset),
- element_size_(element_size) {
- DCHECK(array_ != nullptr || size_ == 0);
- DCHECK_LE(start_offset, end_offset);
- if (size_ != 0) {
- uintptr_t offset = start_offset * element_size_;
- array_ = *reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(array) + offset);
- }
- }
+ // Iterators.
+ iterator begin() { return iterator(&AtUnchecked(0), element_size_); }
+ const_iterator begin() const { return const_iterator(&AtUnchecked(0), element_size_); }
+ const_iterator cbegin() const { return const_iterator(&AtUnchecked(0), element_size_); }
+ StrideIterator<T> end() { return StrideIterator<T>(&AtUnchecked(size_), element_size_); }
+ const_iterator end() const { return const_iterator(&AtUnchecked(size_), element_size_); }
+ const_iterator cend() const { return const_iterator(&AtUnchecked(size_), element_size_); }
+ reverse_iterator rbegin() { return reverse_iterator(end()); }
+ const_reverse_iterator rbegin() const { return const_reverse_iterator(end()); }
+ const_reverse_iterator crbegin() const { return const_reverse_iterator(cend()); }
+ reverse_iterator rend() { return reverse_iterator(begin()); }
+ const_reverse_iterator rend() const { return const_reverse_iterator(begin()); }
+ const_reverse_iterator crend() const { return const_reverse_iterator(cbegin()); }
- // Create an array slice of the elements between start_offset and end_offset of the array with
- // each element being element_size bytes long and having the given alignment. Both start_offset
- // and end_offset are in element_size units.
- ArraySlice(LengthPrefixedArray<T>* array,
- uint32_t start_offset,
- uint32_t end_offset,
- size_t element_size = sizeof(T),
- size_t alignment = alignof(T))
- : array_(nullptr),
- size_(end_offset - start_offset),
- element_size_(element_size) {
- DCHECK(array != nullptr || size_ == 0);
- if (size_ != 0) {
- DCHECK_LE(start_offset, end_offset);
- DCHECK_LE(start_offset, array->size());
- DCHECK_LE(end_offset, array->size());
- array_ = &array->At(start_offset, element_size_, alignment);
- }
- }
+ // Size.
+ size_type size() const { return size_; }
+ bool empty() const { return size() == 0u; }
- T& At(size_t index) {
+ // Element access. NOTE: Not providing at() and data().
+
+ reference operator[](size_t index) {
DCHECK_LT(index, size_);
return AtUnchecked(index);
}
- const T& At(size_t index) const {
+ const_reference operator[](size_t index) const {
DCHECK_LT(index, size_);
return AtUnchecked(index);
}
- T& operator[](size_t index) {
- return At(index);
+ reference front() {
+ DCHECK(!empty());
+ return (*this)[0];
}
- const T& operator[](size_t index) const {
- return At(index);
+ const_reference front() const {
+ DCHECK(!empty());
+ return (*this)[0];
}
- StrideIterator<T> begin() {
- return StrideIterator<T>(&AtUnchecked(0), element_size_);
+ reference back() {
+ DCHECK(!empty());
+ return (*this)[size_ - 1u];
}
- StrideIterator<const T> begin() const {
- return StrideIterator<const T>(&AtUnchecked(0), element_size_);
+ const_reference back() const {
+ DCHECK(!empty());
+ return (*this)[size_ - 1u];
}
- StrideIterator<T> end() {
- return StrideIterator<T>(&AtUnchecked(size_), element_size_);
+ ArraySlice<T> SubArray(size_type pos) {
+ return SubArray(pos, size() - pos);
}
- StrideIterator<const T> end() const {
- return StrideIterator<const T>(&AtUnchecked(size_), element_size_);
+ ArraySlice<const T> SubArray(size_type pos) const {
+ return SubArray(pos, size() - pos);
}
- IterationRange<StrideIterator<T>> AsRange() {
- return size() != 0 ? MakeIterationRange(begin(), end())
- : MakeEmptyIterationRange(StrideIterator<T>(nullptr, 0));
+ ArraySlice<T> SubArray(size_type pos, size_type length) {
+ DCHECK_LE(pos, size());
+ DCHECK_LE(length, size() - pos);
+ return ArraySlice<T>(&AtUnchecked(pos), length, element_size_);
}
- size_t size() const {
- return size_;
+ ArraySlice<const T> SubArray(size_type pos, size_type length) const {
+ DCHECK_LE(pos, size());
+ DCHECK_LE(length, size() - pos);
+ return ArraySlice<const T>(&AtUnchecked(pos), length, element_size_);
}
size_t ElementSize() const {
diff --git a/runtime/base/length_prefixed_array.h b/runtime/base/length_prefixed_array.h
index 8060263..a570b81 100644
--- a/runtime/base/length_prefixed_array.h
+++ b/runtime/base/length_prefixed_array.h
@@ -65,7 +65,7 @@
size_t element_size = sizeof(T),
size_t alignment = alignof(T)) {
DCHECK_ALIGNED_PARAM(element_size, alignment);
- return RoundUp(offsetof(LengthPrefixedArray<T>, data), alignment) + index * element_size;
+ return RoundUp(offsetof(LengthPrefixedArray<T>, data_), alignment) + index * element_size;
}
static size_t ComputeSize(size_t num_elements,
@@ -87,7 +87,7 @@
// Clear the potentially uninitialized padding between the size_ and actual data.
void ClearPadding(size_t element_size = sizeof(T), size_t alignment = alignof(T)) {
- size_t gap_offset = offsetof(LengthPrefixedArray<T>, data);
+ size_t gap_offset = offsetof(LengthPrefixedArray<T>, data_);
size_t gap_size = OffsetOfElement(0, element_size, alignment) - gap_offset;
memset(reinterpret_cast<uint8_t*>(this) + gap_offset, 0, gap_size);
}
@@ -104,7 +104,7 @@
}
uint32_t size_;
- uint8_t data[0];
+ uint8_t data_[0];
};
// Returns empty iteration range if the array is null.
diff --git a/runtime/base/scoped_flock.cc b/runtime/base/scoped_flock.cc
index 862f0d0..b8df689 100644
--- a/runtime/base/scoped_flock.cc
+++ b/runtime/base/scoped_flock.cc
@@ -28,46 +28,39 @@
using android::base::StringPrintf;
-bool ScopedFlock::Init(const char* filename, std::string* error_msg) {
- return Init(filename, O_CREAT | O_RDWR, true, error_msg);
+/* static */ ScopedFlock LockedFile::Open(const char* filename, std::string* error_msg) {
+ return Open(filename, O_CREAT | O_RDWR, true, error_msg);
}
-bool ScopedFlock::Init(const char* filename, int flags, bool block, std::string* error_msg) {
- return Init(filename, flags, block, true, error_msg);
-}
-
-bool ScopedFlock::Init(const char* filename,
- int flags,
- bool block,
- bool flush_on_close,
- std::string* error_msg) {
- flush_on_close_ = flush_on_close;
+/* static */ ScopedFlock LockedFile::Open(const char* filename, int flags, bool block,
+ std::string* error_msg) {
while (true) {
- if (file_.get() != nullptr) {
- UNUSED(file_->FlushCloseOrErase()); // Ignore result.
+ // NOTE: We don't check usage here because the ScopedFlock should *never* be
+ // responsible for flushing its underlying FD. Its only purpose should be
+ // to acquire a lock, and the unlock / close in the corresponding
+ // destructor. Callers should explicitly flush files they're writing to if
+ // that is the desired behaviour.
+ std::unique_ptr<File> file(OS::OpenFileWithFlags(filename, flags, false /* check_usage */));
+ if (file.get() == nullptr) {
+ *error_msg = StringPrintf("Failed to open file '%s': %s", filename, strerror(errno));
+ return nullptr;
}
- bool check_usage = flush_on_close; // Check usage only if we need to flush on close.
- file_.reset(OS::OpenFileWithFlags(filename, flags, check_usage));
- if (file_.get() == nullptr) {
- *error_msg = StringPrintf("Failed to open file '%s': %s", filename, strerror(errno));
- return false;
- }
int operation = block ? LOCK_EX : (LOCK_EX | LOCK_NB);
- int flock_result = TEMP_FAILURE_RETRY(flock(file_->Fd(), operation));
+ int flock_result = TEMP_FAILURE_RETRY(flock(file->Fd(), operation));
if (flock_result == EWOULDBLOCK) {
// File is locked by someone else and we are required not to block;
- return false;
+ return nullptr;
}
if (flock_result != 0) {
*error_msg = StringPrintf("Failed to lock file '%s': %s", filename, strerror(errno));
- return false;
+ return nullptr;
}
struct stat fstat_stat;
- int fstat_result = TEMP_FAILURE_RETRY(fstat(file_->Fd(), &fstat_stat));
+ int fstat_result = TEMP_FAILURE_RETRY(fstat(file->Fd(), &fstat_stat));
if (fstat_result != 0) {
*error_msg = StringPrintf("Failed to fstat file '%s': %s", filename, strerror(errno));
- return false;
+ return nullptr;
}
struct stat stat_stat;
int stat_result = TEMP_FAILURE_RETRY(stat(filename, &stat_stat));
@@ -80,7 +73,7 @@
// Note that in theory we could race with someone here for a long time and end up retrying
// over and over again. This potential behavior does not fit well in the non-blocking
// semantics. Thus, if we are not require to block return failure when racing.
- return false;
+ return nullptr;
}
}
if (fstat_stat.st_dev != stat_stat.st_dev || fstat_stat.st_ino != stat_stat.st_ino) {
@@ -89,61 +82,47 @@
continue;
} else {
// See comment above.
- return false;
+ return nullptr;
}
}
- return true;
+
+ return ScopedFlock(new LockedFile(std::move((*file.get()))));
}
}
-bool ScopedFlock::Init(File* file, std::string* error_msg) {
- flush_on_close_ = true;
- file_.reset(new File(dup(file->Fd()), file->GetPath(), file->CheckUsage(), file->ReadOnlyMode()));
- if (file_->Fd() == -1) {
- file_.reset();
+ScopedFlock LockedFile::DupOf(const int fd, const std::string& path,
+ const bool read_only_mode, std::string* error_msg) {
+ // NOTE: We don't check usage here because the ScopedFlock should *never* be
+ // responsible for flushing its underlying FD. Its only purpose should be
+ // to acquire a lock, and the unlock / close in the corresponding
+ // destructor. Callers should explicitly flush files they're writing to if
+ // that is the desired behaviour.
+ ScopedFlock locked_file(
+ new LockedFile(dup(fd), path, false /* check_usage */, read_only_mode));
+ if (locked_file->Fd() == -1) {
*error_msg = StringPrintf("Failed to duplicate open file '%s': %s",
- file->GetPath().c_str(), strerror(errno));
- return false;
+ locked_file->GetPath().c_str(), strerror(errno));
+ return nullptr;
}
- if (0 != TEMP_FAILURE_RETRY(flock(file_->Fd(), LOCK_EX))) {
- file_.reset();
+ if (0 != TEMP_FAILURE_RETRY(flock(locked_file->Fd(), LOCK_EX))) {
*error_msg = StringPrintf(
- "Failed to lock file '%s': %s", file->GetPath().c_str(), strerror(errno));
- return false;
+ "Failed to lock file '%s': %s", locked_file->GetPath().c_str(), strerror(errno));
+ return nullptr;
}
- return true;
+
+ return locked_file;
}
-File* ScopedFlock::GetFile() const {
- CHECK(file_.get() != nullptr);
- return file_.get();
-}
-
-bool ScopedFlock::HasFile() {
- return file_.get() != nullptr;
-}
-
-ScopedFlock::ScopedFlock() : flush_on_close_(true) { }
-
-ScopedFlock::~ScopedFlock() {
- if (file_.get() != nullptr) {
- int flock_result = TEMP_FAILURE_RETRY(flock(file_->Fd(), LOCK_UN));
+void LockedFile::ReleaseLock() {
+ if (this->Fd() != -1) {
+ int flock_result = TEMP_FAILURE_RETRY(flock(this->Fd(), LOCK_UN));
if (flock_result != 0) {
// Only printing a warning is okay since this is only used with either:
// 1) a non-blocking Init call, or
// 2) as a part of a seperate binary (eg dex2oat) which has it's own timeout logic to prevent
// deadlocks.
// This means we can be sure that the warning won't cause a deadlock.
- PLOG(WARNING) << "Unable to unlock file " << file_->GetPath();
- }
- int close_result = -1;
- if (file_->ReadOnlyMode() || !flush_on_close_) {
- close_result = file_->Close();
- } else {
- close_result = file_->FlushCloseOrErase();
- }
- if (close_result != 0) {
- PLOG(WARNING) << "Could not close scoped file lock file.";
+ PLOG(WARNING) << "Unable to unlock file " << this->GetPath();
}
}
}
diff --git a/runtime/base/scoped_flock.h b/runtime/base/scoped_flock.h
index a3a320f..1b933c0 100644
--- a/runtime/base/scoped_flock.h
+++ b/runtime/base/scoped_flock.h
@@ -20,63 +20,68 @@
#include <memory>
#include <string>
+#include "android-base/unique_fd.h"
+
+#include "base/logging.h"
#include "base/macros.h"
+#include "base/unix_file/fd_file.h"
#include "os.h"
namespace art {
-// A scoped file-lock implemented using flock. The file is locked by calling the Init function and
-// is released during destruction. Note that failing to unlock the file only causes a warning to be
-// printed. Users should take care that this does not cause potential deadlocks.
-//
-// Only printing a warning on unlock failure is okay since this is only used with either:
-// 1) a non-blocking Init call, or
-// 2) as a part of a seperate binary (eg dex2oat) which has it's own timeout logic to prevent
-// deadlocks.
-// This means we can be sure that the warning won't cause a deadlock.
-class ScopedFlock {
- public:
- ScopedFlock();
+class LockedFile;
+class LockedFileCloseNoFlush;
+// A scoped File object that calls Close without flushing.
+typedef std::unique_ptr<LockedFile, LockedFileCloseNoFlush> ScopedFlock;
+
+class LockedFile : public unix_file::FdFile {
+ public:
// Attempts to acquire an exclusive file lock (see flock(2)) on the file
// at filename, and blocks until it can do so.
//
- // Returns true if the lock could be acquired, or false if an error occurred.
// It is an error if its inode changed (usually due to a new file being
// created at the same path) between attempts to lock it. In blocking mode,
// locking will be retried if the file changed. In non-blocking mode, false
// is returned and no attempt is made to re-acquire the lock.
//
- // The argument `flush_on_close` controls whether or not the file
- // will be explicitly flushed before close.
- //
// The file is opened with the provided flags.
- bool Init(const char* filename,
- int flags,
- bool block,
- bool flush_on_close,
- std::string* error_msg);
- // Calls Init(filename, flags, block, true, error_msg);
- bool Init(const char* filename, int flags, bool block, std::string* error_msg);
- // Calls Init(filename, O_CREAT | O_RDWR, true, errror_msg)
- bool Init(const char* filename, std::string* error_msg);
+ static ScopedFlock Open(const char* filename, int flags, bool block,
+ std::string* error_msg);
+
+ // Calls Open(filename, O_CREAT | O_RDWR, true, errror_msg)
+ static ScopedFlock Open(const char* filename, std::string* error_msg);
+
// Attempt to acquire an exclusive file lock (see flock(2)) on 'file'.
// Returns true if the lock could be acquired or false if an error
// occured.
- bool Init(File* file, std::string* error_msg);
+ static ScopedFlock DupOf(const int fd, const std::string& path,
+ const bool read_only_mode, std::string* error_message);
- // Returns the (locked) file associated with this instance.
- File* GetFile() const;
-
- // Returns whether a file is held.
- bool HasFile();
-
- ~ScopedFlock();
+ // Release a lock held on this file, if any.
+ void ReleaseLock();
private:
- std::unique_ptr<File> file_;
- bool flush_on_close_;
- DISALLOW_COPY_AND_ASSIGN(ScopedFlock);
+ // Constructors should not be invoked directly, use one of the factory
+ // methods instead.
+ explicit LockedFile(FdFile&& other) : FdFile(std::move(other)) {
+ }
+
+ // Constructors should not be invoked directly, use one of the factory
+ // methods instead.
+ LockedFile(int fd, const std::string& path, bool check_usage, bool read_only_mode)
+ : FdFile(fd, path, check_usage, read_only_mode) {
+ }
+};
+
+class LockedFileCloseNoFlush {
+ public:
+ void operator()(LockedFile* ptr) {
+ ptr->ReleaseLock();
+ UNUSED(ptr->Close());
+
+ delete ptr;
+ }
};
} // namespace art
diff --git a/runtime/base/scoped_flock_test.cc b/runtime/base/scoped_flock_test.cc
index 1fa7a12..1b6caaf 100644
--- a/runtime/base/scoped_flock_test.cc
+++ b/runtime/base/scoped_flock_test.cc
@@ -30,11 +30,33 @@
// to each other, so attempting to query locks set by flock using
// using fcntl(,F_GETLK,) will not work. see kernel doc at
// Documentation/filesystems/locks.txt.
- ScopedFlock file_lock;
- ASSERT_TRUE(file_lock.Init(scratch_file.GetFilename().c_str(),
- &error_msg));
+ {
+ ScopedFlock file_lock = LockedFile::Open(scratch_file.GetFilename().c_str(),
+ &error_msg);
+ ASSERT_TRUE(file_lock.get() != nullptr);
- ASSERT_FALSE(file_lock.Init("/guaranteed/not/to/exist", &error_msg));
+ // Attempt to acquire a second lock on the same file. This must fail.
+ ScopedFlock second_lock = LockedFile::Open(scratch_file.GetFilename().c_str(),
+ O_RDONLY,
+ /* block */ false,
+ &error_msg);
+ ASSERT_TRUE(second_lock.get() == nullptr);
+ ASSERT_TRUE(!error_msg.empty());
+ }
+
+ {
+ // Attempt to reacquire the lock once the first lock has been released, this
+ // must succeed.
+ ScopedFlock file_lock = LockedFile::Open(scratch_file.GetFilename().c_str(),
+ &error_msg);
+ ASSERT_TRUE(file_lock.get() != nullptr);
+ }
+
+ {
+ ScopedFlock file_lock = LockedFile::Open("/will/not/exist",
+ &error_msg);
+ ASSERT_TRUE(file_lock.get() == nullptr);
+ }
}
} // namespace art
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 88a5a13..01fc9ce 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -136,7 +136,7 @@
// Rely on the fact that the methods are contiguous to determine the index of the method in
// the slice.
int throws_index = (reinterpret_cast<uintptr_t>(proxy_method) -
- reinterpret_cast<uintptr_t>(&virtual_methods.At(0))) / method_size;
+ reinterpret_cast<uintptr_t>(&virtual_methods[0])) / method_size;
CHECK_LT(throws_index, static_cast<int>(num_virtuals));
mirror::ObjectArray<mirror::Class>* declared_exceptions =
proxy_class->GetProxyThrows()->Get(throws_index);
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 9da2876..1bf9285 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -482,21 +482,22 @@
bool validate_oat_file,
std::string* error_msg)
REQUIRES_SHARED(Locks::mutator_lock_) {
- // Note that we must not use the file descriptor associated with
- // ScopedFlock::GetFile to Init the image file. We want the file
- // descriptor (and the associated exclusive lock) to be released when
- // we leave Create.
- ScopedFlock image_lock;
// Should this be a RDWR lock? This is only a defensive measure, as at
// this point the image should exist.
// However, only the zygote can write into the global dalvik-cache, so
// restrict to zygote processes, or any process that isn't using
// /data/dalvik-cache (which we assume to be allowed to write there).
const bool rw_lock = is_zygote || !is_global_cache;
- image_lock.Init(image_filename.c_str(),
- rw_lock ? (O_CREAT | O_RDWR) : O_RDONLY /* flags */,
- true /* block */,
- error_msg);
+
+ // Note that we must not use the file descriptor associated with
+ // ScopedFlock::GetFile to Init the image file. We want the file
+ // descriptor (and the associated exclusive lock) to be released when
+ // we leave Create.
+ ScopedFlock image = LockedFile::Open(image_filename.c_str(),
+ rw_lock ? (O_CREAT | O_RDWR) : O_RDONLY /* flags */,
+ true /* block */,
+ error_msg);
+
VLOG(startup) << "Using image file " << image_filename.c_str() << " for image location "
<< image_location;
// If we are in /system we can assume the image is good. We can also
diff --git a/runtime/jit/profile_compilation_info.cc b/runtime/jit/profile_compilation_info.cc
index a67fb38..580be04 100644
--- a/runtime/jit/profile_compilation_info.cc
+++ b/runtime/jit/profile_compilation_info.cc
@@ -165,18 +165,20 @@
bool ProfileCompilationInfo::Load(const std::string& filename, bool clear_if_invalid) {
ScopedTrace trace(__PRETTY_FUNCTION__);
- ScopedFlock flock;
std::string error;
int flags = O_RDWR | O_NOFOLLOW | O_CLOEXEC;
// There's no need to fsync profile data right away. We get many chances
// to write it again in case something goes wrong. We can rely on a simple
// close(), no sync, and let to the kernel decide when to write to disk.
- if (!flock.Init(filename.c_str(), flags, /*block*/false, /*flush_on_close*/false, &error)) {
+ ScopedFlock profile_file = LockedFile::Open(filename.c_str(), flags,
+ /*block*/false, &error);
+
+ if (profile_file.get() == nullptr) {
LOG(WARNING) << "Couldn't lock the profile file " << filename << ": " << error;
return false;
}
- int fd = flock.GetFile()->Fd();
+ int fd = profile_file->Fd();
ProfileLoadSatus status = LoadInternal(fd, &error);
if (status == kProfileLoadSuccess) {
@@ -187,7 +189,7 @@
((status == kProfileLoadVersionMismatch) || (status == kProfileLoadBadData))) {
LOG(WARNING) << "Clearing bad or obsolete profile data from file "
<< filename << ": " << error;
- if (flock.GetFile()->ClearContent()) {
+ if (profile_file->ClearContent()) {
return true;
} else {
PLOG(WARNING) << "Could not clear profile file: " << filename;
@@ -201,21 +203,22 @@
bool ProfileCompilationInfo::Save(const std::string& filename, uint64_t* bytes_written) {
ScopedTrace trace(__PRETTY_FUNCTION__);
- ScopedFlock flock;
std::string error;
int flags = O_WRONLY | O_NOFOLLOW | O_CLOEXEC;
// There's no need to fsync profile data right away. We get many chances
// to write it again in case something goes wrong. We can rely on a simple
// close(), no sync, and let to the kernel decide when to write to disk.
- if (!flock.Init(filename.c_str(), flags, /*block*/false, /*flush_on_close*/false, &error)) {
+ ScopedFlock profile_file = LockedFile::Open(filename.c_str(), flags,
+ /*block*/false, &error);
+ if (profile_file.get() == nullptr) {
LOG(WARNING) << "Couldn't lock the profile file " << filename << ": " << error;
return false;
}
- int fd = flock.GetFile()->Fd();
+ int fd = profile_file->Fd();
// We need to clear the data because we don't support appending to the profiles yet.
- if (!flock.GetFile()->ClearContent()) {
+ if (!profile_file->ClearContent()) {
PLOG(WARNING) << "Could not clear profile file: " << filename;
return false;
}
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index c8d4557..12baf38 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -127,11 +127,9 @@
}
inline ArraySlice<ArtMethod> Class::GetDirectMethodsSliceUnchecked(PointerSize pointer_size) {
- return ArraySlice<ArtMethod>(GetMethodsPtr(),
- GetDirectMethodsStartOffset(),
- GetVirtualMethodsStartOffset(),
- ArtMethod::Size(pointer_size),
- ArtMethod::Alignment(pointer_size));
+ return GetMethodsSliceRangeUnchecked(pointer_size,
+ GetDirectMethodsStartOffset(),
+ GetVirtualMethodsStartOffset());
}
template<VerifyObjectFlags kVerifyFlags>
@@ -141,11 +139,9 @@
}
inline ArraySlice<ArtMethod> Class::GetDeclaredMethodsSliceUnchecked(PointerSize pointer_size) {
- return ArraySlice<ArtMethod>(GetMethodsPtr(),
- GetDirectMethodsStartOffset(),
- GetCopiedMethodsStartOffset(),
- ArtMethod::Size(pointer_size),
- ArtMethod::Alignment(pointer_size));
+ return GetMethodsSliceRangeUnchecked(pointer_size,
+ GetDirectMethodsStartOffset(),
+ GetCopiedMethodsStartOffset());
}
template<VerifyObjectFlags kVerifyFlags>
inline ArraySlice<ArtMethod> Class::GetDeclaredVirtualMethodsSlice(PointerSize pointer_size) {
@@ -155,11 +151,9 @@
inline ArraySlice<ArtMethod> Class::GetDeclaredVirtualMethodsSliceUnchecked(
PointerSize pointer_size) {
- return ArraySlice<ArtMethod>(GetMethodsPtr(),
- GetVirtualMethodsStartOffset(),
- GetCopiedMethodsStartOffset(),
- ArtMethod::Size(pointer_size),
- ArtMethod::Alignment(pointer_size));
+ return GetMethodsSliceRangeUnchecked(pointer_size,
+ GetVirtualMethodsStartOffset(),
+ GetCopiedMethodsStartOffset());
}
template<VerifyObjectFlags kVerifyFlags>
@@ -169,12 +163,9 @@
}
inline ArraySlice<ArtMethod> Class::GetVirtualMethodsSliceUnchecked(PointerSize pointer_size) {
- LengthPrefixedArray<ArtMethod>* methods = GetMethodsPtr();
- return ArraySlice<ArtMethod>(methods,
- GetVirtualMethodsStartOffset(),
- NumMethods(),
- ArtMethod::Size(pointer_size),
- ArtMethod::Alignment(pointer_size));
+ return GetMethodsSliceRangeUnchecked(pointer_size,
+ GetVirtualMethodsStartOffset(),
+ NumMethods());
}
template<VerifyObjectFlags kVerifyFlags>
@@ -184,12 +175,7 @@
}
inline ArraySlice<ArtMethod> Class::GetCopiedMethodsSliceUnchecked(PointerSize pointer_size) {
- LengthPrefixedArray<ArtMethod>* methods = GetMethodsPtr();
- return ArraySlice<ArtMethod>(methods,
- GetCopiedMethodsStartOffset(),
- NumMethods(),
- ArtMethod::Size(pointer_size),
- ArtMethod::Alignment(pointer_size));
+ return GetMethodsSliceRangeUnchecked(pointer_size, GetCopiedMethodsStartOffset(), NumMethods());
}
inline LengthPrefixedArray<ArtMethod>* Class::GetMethodsPtr() {
@@ -200,14 +186,28 @@
template<VerifyObjectFlags kVerifyFlags>
inline ArraySlice<ArtMethod> Class::GetMethodsSlice(PointerSize pointer_size) {
DCHECK(IsLoaded() || IsErroneous());
- LengthPrefixedArray<ArtMethod>* methods = GetMethodsPtr();
- return ArraySlice<ArtMethod>(methods,
- 0,
- NumMethods(),
- ArtMethod::Size(pointer_size),
- ArtMethod::Alignment(pointer_size));
+ return GetMethodsSliceRangeUnchecked(pointer_size, 0, NumMethods());
}
+inline ArraySlice<ArtMethod> Class::GetMethodsSliceRangeUnchecked(PointerSize pointer_size,
+ uint32_t start_offset,
+ uint32_t end_offset) {
+ DCHECK_LE(start_offset, end_offset);
+ DCHECK_LE(end_offset, NumMethods());
+ uint32_t size = end_offset - start_offset;
+ if (size == 0u) {
+ return ArraySlice<ArtMethod>();
+ }
+ LengthPrefixedArray<ArtMethod>* methods = GetMethodsPtr();
+ DCHECK(methods != nullptr);
+ DCHECK_LE(end_offset, methods->size());
+ size_t method_size = ArtMethod::Size(pointer_size);
+ size_t method_alignment = ArtMethod::Alignment(pointer_size);
+ ArraySlice<ArtMethod> slice(&methods->At(0u, method_size, method_alignment),
+ methods->size(),
+ method_size);
+ return slice.SubArray(start_offset, size);
+}
inline uint32_t Class::NumMethods() {
LengthPrefixedArray<ArtMethod>* methods = GetMethodsPtr();
@@ -216,12 +216,12 @@
inline ArtMethod* Class::GetDirectMethodUnchecked(size_t i, PointerSize pointer_size) {
CheckPointerSize(pointer_size);
- return &GetDirectMethodsSliceUnchecked(pointer_size).At(i);
+ return &GetDirectMethodsSliceUnchecked(pointer_size)[i];
}
inline ArtMethod* Class::GetDirectMethod(size_t i, PointerSize pointer_size) {
CheckPointerSize(pointer_size);
- return &GetDirectMethodsSlice(pointer_size).At(i);
+ return &GetDirectMethodsSlice(pointer_size)[i];
}
inline void Class::SetMethodsPtr(LengthPrefixedArray<ArtMethod>* new_methods,
@@ -264,7 +264,7 @@
inline ArtMethod* Class::GetVirtualMethodUnchecked(size_t i, PointerSize pointer_size) {
CheckPointerSize(pointer_size);
- return &GetVirtualMethodsSliceUnchecked(pointer_size).At(i);
+ return &GetVirtualMethodsSliceUnchecked(pointer_size)[i];
}
template<VerifyObjectFlags kVerifyFlags,
@@ -944,38 +944,36 @@
}
}
-inline IterationRange<StrideIterator<ArtMethod>> Class::GetDirectMethods(PointerSize pointer_size) {
+inline ArraySlice<ArtMethod> Class::GetDirectMethods(PointerSize pointer_size) {
CheckPointerSize(pointer_size);
- return GetDirectMethodsSliceUnchecked(pointer_size).AsRange();
+ return GetDirectMethodsSliceUnchecked(pointer_size);
}
-inline IterationRange<StrideIterator<ArtMethod>> Class::GetDeclaredMethods(
+inline ArraySlice<ArtMethod> Class::GetDeclaredMethods(
PointerSize pointer_size) {
- return GetDeclaredMethodsSliceUnchecked(pointer_size).AsRange();
+ return GetDeclaredMethodsSliceUnchecked(pointer_size);
}
-inline IterationRange<StrideIterator<ArtMethod>> Class::GetDeclaredVirtualMethods(
+inline ArraySlice<ArtMethod> Class::GetDeclaredVirtualMethods(
PointerSize pointer_size) {
- return GetDeclaredVirtualMethodsSliceUnchecked(pointer_size).AsRange();
+ return GetDeclaredVirtualMethodsSliceUnchecked(pointer_size);
}
-inline IterationRange<StrideIterator<ArtMethod>> Class::GetVirtualMethods(
+inline ArraySlice<ArtMethod> Class::GetVirtualMethods(
PointerSize pointer_size) {
CheckPointerSize(pointer_size);
- return GetVirtualMethodsSliceUnchecked(pointer_size).AsRange();
+ return GetVirtualMethodsSliceUnchecked(pointer_size);
}
-inline IterationRange<StrideIterator<ArtMethod>> Class::GetCopiedMethods(PointerSize pointer_size) {
+inline ArraySlice<ArtMethod> Class::GetCopiedMethods(PointerSize pointer_size) {
CheckPointerSize(pointer_size);
- return GetCopiedMethodsSliceUnchecked(pointer_size).AsRange();
+ return GetCopiedMethodsSliceUnchecked(pointer_size);
}
-inline IterationRange<StrideIterator<ArtMethod>> Class::GetMethods(PointerSize pointer_size) {
+inline ArraySlice<ArtMethod> Class::GetMethods(PointerSize pointer_size) {
CheckPointerSize(pointer_size);
- return MakeIterationRangeFromLengthPrefixedArray(GetMethodsPtr(),
- ArtMethod::Size(pointer_size),
- ArtMethod::Alignment(pointer_size));
+ return GetMethodsSliceRangeUnchecked(pointer_size, 0u, NumMethods());
}
inline IterationRange<StrideIterator<ArtField>> Class::GetIFields() {
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 913ab79..61d6e05 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -716,7 +716,7 @@
// Also updates the dex_cache_strings_ variable from new_dex_cache.
void SetDexCache(ObjPtr<DexCache> new_dex_cache) REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDirectMethods(PointerSize pointer_size)
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetDirectMethods(PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE LengthPrefixedArray<ArtMethod>* GetMethodsPtr()
@@ -726,7 +726,7 @@
return MemberOffset(OFFSETOF_MEMBER(Class, methods_));
}
- ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetMethods(PointerSize pointer_size)
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetMethods(PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
void SetMethodsPtr(LengthPrefixedArray<ArtMethod>* new_methods,
@@ -763,7 +763,7 @@
ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredMethodsSlice(PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDeclaredMethods(
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredMethods(
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -784,7 +784,7 @@
ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredVirtualMethodsSlice(PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDeclaredVirtualMethods(
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredVirtualMethods(
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -792,14 +792,14 @@
ALWAYS_INLINE ArraySlice<ArtMethod> GetCopiedMethodsSlice(PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetCopiedMethods(PointerSize pointer_size)
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetCopiedMethods(PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE ArraySlice<ArtMethod> GetVirtualMethodsSlice(PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetVirtualMethods(
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetVirtualMethods(
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1347,6 +1347,11 @@
ALWAYS_INLINE void SetMethodsPtrInternal(LengthPrefixedArray<ArtMethod>* new_methods)
REQUIRES_SHARED(Locks::mutator_lock_);
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetMethodsSliceRangeUnchecked(PointerSize pointer_size,
+ uint32_t start_offset,
+ uint32_t end_offset)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
template <bool throw_on_failure, bool use_referrers_cache>
bool ResolvedFieldAccessTest(ObjPtr<Class> access_to,
ArtField* field,
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 2e2e8c3..4820feb 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -141,8 +141,8 @@
OatFileAssistant::~OatFileAssistant() {
// Clean up the lock file.
- if (flock_.HasFile()) {
- unlink(flock_.GetFile()->GetPath().c_str());
+ if (flock_.get() != nullptr) {
+ unlink(flock_->GetPath().c_str());
}
}
@@ -165,7 +165,7 @@
bool OatFileAssistant::Lock(std::string* error_msg) {
CHECK(error_msg != nullptr);
- CHECK(!flock_.HasFile()) << "OatFileAssistant::Lock already acquired";
+ CHECK(flock_.get() == nullptr) << "OatFileAssistant::Lock already acquired";
// Note the lock will only succeed for secondary dex files and in test
// environment.
@@ -179,7 +179,8 @@
// to generate oat files anyway.
std::string lock_file_name = dex_location_ + "." + GetInstructionSetString(isa_) + ".flock";
- if (!flock_.Init(lock_file_name.c_str(), error_msg)) {
+ flock_ = LockedFile::Open(lock_file_name.c_str(), error_msg);
+ if (flock_.get() == nullptr) {
unlink(lock_file_name.c_str());
return false;
}
diff --git a/runtime/openjdkjvmti/art_jvmti.h b/runtime/openjdkjvmti/art_jvmti.h
index af85fb0..369b2d7 100644
--- a/runtime/openjdkjvmti/art_jvmti.h
+++ b/runtime/openjdkjvmti/art_jvmti.h
@@ -37,7 +37,6 @@
#include <jni.h>
-#include "base/array_slice.h"
#include "base/casts.h"
#include "base/logging.h"
#include "base/macros.h"
diff --git a/runtime/openjdkjvmti/ti_class.cc b/runtime/openjdkjvmti/ti_class.cc
index ed54cd1..cd078b6 100644
--- a/runtime/openjdkjvmti/ti_class.cc
+++ b/runtime/openjdkjvmti/ti_class.cc
@@ -37,6 +37,7 @@
#include <unordered_set>
#include "art_jvmti.h"
+#include "base/array_ref.h"
#include "base/macros.h"
#include "class_table-inl.h"
#include "class_linker.h"
@@ -83,7 +84,7 @@
REQUIRES_SHARED(art::Locks::mutator_lock_) {
// Make the mmap
std::string error_msg;
- art::ArraySlice<const unsigned char> final_data(final_dex_data, final_len);
+ art::ArrayRef<const unsigned char> final_data(final_dex_data, final_len);
std::unique_ptr<art::MemMap> map(Redefiner::MoveDataToMemMap(orig_location,
final_data,
&error_msg));
diff --git a/runtime/openjdkjvmti/ti_class_definition.cc b/runtime/openjdkjvmti/ti_class_definition.cc
index 180895b..8e8ab19 100644
--- a/runtime/openjdkjvmti/ti_class_definition.cc
+++ b/runtime/openjdkjvmti/ti_class_definition.cc
@@ -56,7 +56,7 @@
// be that agents were removed in the mean-time so we still have a different dex file. The dex
// checksum means this is likely to be fairly fast.
return static_cast<jint>(original_dex_file_.size()) != dex_len_ ||
- memcmp(&original_dex_file_.At(0), dex_data_.get(), dex_len_) != 0;
+ memcmp(original_dex_file_.data(), dex_data_.get(), dex_len_) != 0;
}
jvmtiError ArtClassDefinition::InitCommon(ArtJvmTiEnv* env, jclass klass) {
@@ -152,12 +152,12 @@
unsigned char* original_data_memory = nullptr;
res = CopyDataIntoJvmtiBuffer(env, dex_data_.get(), dex_len_, &original_data_memory);
original_dex_file_memory_ = MakeJvmtiUniquePtr(env, original_data_memory);
- original_dex_file_ = art::ArraySlice<const unsigned char>(original_data_memory, dex_len_);
+ original_dex_file_ = art::ArrayRef<const unsigned char>(original_data_memory, dex_len_);
} else {
// We know that we have been redefined at least once (there is an original_dex_file set in
// the class) so we can just use the current dex file directly.
const art::DexFile& dex_file = m_klass->GetDexFile();
- original_dex_file_ = art::ArraySlice<const unsigned char>(dex_file.Begin(), dex_file.Size());
+ original_dex_file_ = art::ArrayRef<const unsigned char>(dex_file.Begin(), dex_file.Size());
}
return res;
}
@@ -168,7 +168,7 @@
return res;
}
unsigned char* new_data = nullptr;
- original_dex_file_ = art::ArraySlice<const unsigned char>(def.class_bytes, def.class_byte_count);
+ original_dex_file_ = art::ArrayRef<const unsigned char>(def.class_bytes, def.class_byte_count);
redefined_ = true;
dex_len_ = def.class_byte_count;
res = CopyDataIntoJvmtiBuffer(env, def.class_bytes, def.class_byte_count, /*out*/ &new_data);
diff --git a/runtime/openjdkjvmti/ti_class_definition.h b/runtime/openjdkjvmti/ti_class_definition.h
index 43d0c3f..2c268dd 100644
--- a/runtime/openjdkjvmti/ti_class_definition.h
+++ b/runtime/openjdkjvmti/ti_class_definition.h
@@ -34,6 +34,8 @@
#include "art_jvmti.h"
+#include "base/array_ref.h"
+
namespace openjdkjvmti {
// A struct that stores data needed for redefining/transforming classes. This structure should only
@@ -68,12 +70,12 @@
}
}
- art::ArraySlice<const unsigned char> GetNewOriginalDexFile() const {
+ art::ArrayRef<const unsigned char> GetNewOriginalDexFile() const {
DCHECK(IsInitialized());
if (redefined_) {
return original_dex_file_;
} else {
- return art::ArraySlice<const unsigned char>();
+ return art::ArrayRef<const unsigned char>();
}
}
@@ -103,9 +105,9 @@
return protection_domain_;
}
- art::ArraySlice<const unsigned char> GetDexData() const {
+ art::ArrayRef<const unsigned char> GetDexData() const {
DCHECK(IsInitialized());
- return art::ArraySlice<const unsigned char>(dex_data_.get(), dex_len_);
+ return art::ArrayRef<const unsigned char>(dex_data_.get(), dex_len_);
}
private:
@@ -118,7 +120,7 @@
jint dex_len_;
JvmtiUniquePtr<unsigned char> dex_data_;
JvmtiUniquePtr<unsigned char> original_dex_file_memory_;
- art::ArraySlice<const unsigned char> original_dex_file_;
+ art::ArrayRef<const unsigned char> original_dex_file_;
bool redefined_;
DISALLOW_COPY_AND_ASSIGN(ArtClassDefinition);
diff --git a/runtime/openjdkjvmti/ti_class_loader.cc b/runtime/openjdkjvmti/ti_class_loader.cc
index 5544dde..205046c 100644
--- a/runtime/openjdkjvmti/ti_class_loader.cc
+++ b/runtime/openjdkjvmti/ti_class_loader.cc
@@ -37,7 +37,6 @@
#include "art_field-inl.h"
#include "art_jvmti.h"
-#include "base/array_slice.h"
#include "base/logging.h"
#include "dex_file.h"
#include "dex_file_types.h"
diff --git a/runtime/openjdkjvmti/ti_redefine.cc b/runtime/openjdkjvmti/ti_redefine.cc
index b382a3e..341de0d 100644
--- a/runtime/openjdkjvmti/ti_redefine.cc
+++ b/runtime/openjdkjvmti/ti_redefine.cc
@@ -38,7 +38,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "art_jvmti.h"
-#include "base/array_slice.h"
+#include "base/array_ref.h"
#include "base/logging.h"
#include "class_linker-inl.h"
#include "debugger.h"
@@ -265,7 +265,7 @@
// Moves dex data to an anonymous, read-only mmap'd region.
std::unique_ptr<art::MemMap> Redefiner::MoveDataToMemMap(const std::string& original_location,
- art::ArraySlice<const unsigned char> data,
+ art::ArrayRef<const unsigned char> data,
std::string* error_msg) {
std::unique_ptr<art::MemMap> map(art::MemMap::MapAnonymous(
StringPrintf("%s-transformed", original_location.c_str()).c_str(),
@@ -278,7 +278,7 @@
if (map == nullptr) {
return map;
}
- memcpy(map->Begin(), &data.At(0), data.size());
+ memcpy(map->Begin(), data.data(), data.size());
// Make the dex files mmap read only. This matches how other DexFiles are mmaped and prevents
// programs from corrupting it.
map->Protect(PROT_READ);
@@ -290,7 +290,7 @@
jclass klass,
const art::DexFile* redefined_dex_file,
const char* class_sig,
- art::ArraySlice<const unsigned char> orig_dex_file) :
+ art::ArrayRef<const unsigned char> orig_dex_file) :
driver_(driver),
klass_(klass),
dex_file_(redefined_dex_file),
@@ -493,7 +493,7 @@
if (original_dex_file_.size() != 0) {
return art::mirror::ByteArray::AllocateAndFill(
driver_->self_,
- reinterpret_cast<const signed char*>(&original_dex_file_.At(0)),
+ reinterpret_cast<const signed char*>(original_dex_file_.data()),
original_dex_file_.size());
}
diff --git a/runtime/openjdkjvmti/ti_redefine.h b/runtime/openjdkjvmti/ti_redefine.h
index 5e31627..ec4a8b2 100644
--- a/runtime/openjdkjvmti/ti_redefine.h
+++ b/runtime/openjdkjvmti/ti_redefine.h
@@ -38,7 +38,7 @@
#include "art_jvmti.h"
#include "art_method.h"
-#include "base/array_slice.h"
+#include "base/array_ref.h"
#include "class_linker.h"
#include "dex_file.h"
#include "gc_root-inl.h"
@@ -95,7 +95,7 @@
static jvmtiError IsModifiableClass(jvmtiEnv* env, jclass klass, jboolean* is_redefinable);
static std::unique_ptr<art::MemMap> MoveDataToMemMap(const std::string& original_location,
- art::ArraySlice<const unsigned char> data,
+ art::ArrayRef<const unsigned char> data,
std::string* error_msg);
private:
@@ -105,7 +105,7 @@
jclass klass,
const art::DexFile* redefined_dex_file,
const char* class_sig,
- art::ArraySlice<const unsigned char> orig_dex_file)
+ art::ArrayRef<const unsigned char> orig_dex_file)
REQUIRES_SHARED(art::Locks::mutator_lock_);
// NO_THREAD_SAFETY_ANALYSIS so we can unlock the class in the destructor.
@@ -205,7 +205,7 @@
jclass klass_;
std::unique_ptr<const art::DexFile> dex_file_;
std::string class_sig_;
- art::ArraySlice<const unsigned char> original_dex_file_;
+ art::ArrayRef<const unsigned char> original_dex_file_;
};
jvmtiError result_;
diff --git a/runtime/openjdkjvmti/transform.cc b/runtime/openjdkjvmti/transform.cc
index 15d8dd0..1d7f137 100644
--- a/runtime/openjdkjvmti/transform.cc
+++ b/runtime/openjdkjvmti/transform.cc
@@ -35,6 +35,7 @@
#include "transform.h"
#include "art_method.h"
+#include "base/array_ref.h"
#include "class_linker.h"
#include "dex_file.h"
#include "dex_file_types.h"
@@ -70,7 +71,7 @@
for (ArtClassDefinition& def : *definitions) {
jint new_len = -1;
unsigned char* new_data = nullptr;
- art::ArraySlice<const unsigned char> dex_data = def.GetDexData();
+ art::ArrayRef<const unsigned char> dex_data = def.GetDexData();
event_handler->DispatchEvent<ArtJvmtiEvent::kClassFileLoadHookRetransformable>(
self,
GetJniEnv(env),
@@ -79,7 +80,7 @@
def.GetName().c_str(),
def.GetProtectionDomain(),
static_cast<jint>(dex_data.size()),
- &dex_data.At(0),
+ dex_data.data(),
/*out*/&new_len,
/*out*/&new_data);
def.SetNewDexData(env, new_len, new_data);
diff --git a/test/knownfailures.json b/test/knownfailures.json
index 8843dc8..b89d457 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -715,5 +715,11 @@
"tests": ["059-finalizer-throw", "063-process-manager"],
"description": [ "Tests that take too long on target with gcstress and debug" ],
"variant": "gcstress & target & debug"
+ },
+ {
+ "tests": ["905-object-free"],
+ "description": [ "Flake on gcstress" ],
+ "bug": "b/62562923",
+ "variant": "gcstress & jit & target"
}
]
diff --git a/test/testrunner/target_config.py b/test/testrunner/target_config.py
index 654fb06..baf7600 100644
--- a/test/testrunner/target_config.py
+++ b/test/testrunner/target_config.py
@@ -324,10 +324,14 @@
# ASAN (host) configurations.
+ # These configurations need detect_leaks=0 to work in non-setup environments like build bots,
+ # as our build tools leak. b/37751350
+
'art-gtest-asan': {
'make' : 'test-art-host-gtest',
'env': {
- 'SANITIZE_HOST' : 'address'
+ 'SANITIZE_HOST' : 'address',
+ 'ASAN_OPTIONS' : 'detect_leaks=0'
}
},
'art-asan': {
@@ -335,7 +339,8 @@
'--optimizing',
'--jit'],
'env': {
- 'SANITIZE_HOST' : 'address'
+ 'SANITIZE_HOST' : 'address',
+ 'ASAN_OPTIONS' : 'detect_leaks=0'
}
},
diff --git a/test/ti-stress/stress.cc b/test/ti-stress/stress.cc
index 497db1c..515a391 100644
--- a/test/ti-stress/stress.cc
+++ b/test/ti-stress/stress.cc
@@ -18,6 +18,7 @@
#include <stdio.h>
#include <iostream>
#include <fstream>
+#include <memory>
#include <stdio.h>
#include <sstream>
#include <strstream>
@@ -87,6 +88,142 @@
return ReadIntoBuffer(data->out_temp_dex, dex);
}
+class ScopedThreadInfo {
+ public:
+ ScopedThreadInfo(jvmtiEnv* jvmtienv, JNIEnv* env, jthread thread)
+ : jvmtienv_(jvmtienv), env_(env), free_name_(false) {
+ memset(&info_, 0, sizeof(info_));
+ if (thread == nullptr) {
+ info_.name = const_cast<char*>("<NULLPTR>");
+ } else if (jvmtienv->GetThreadInfo(thread, &info_) != JVMTI_ERROR_NONE) {
+ info_.name = const_cast<char*>("<UNKNOWN THREAD>");
+ } else {
+ free_name_ = true;
+ }
+ }
+
+ ~ScopedThreadInfo() {
+ if (free_name_) {
+ jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(info_.name));
+ }
+ env_->DeleteLocalRef(info_.thread_group);
+ env_->DeleteLocalRef(info_.context_class_loader);
+ }
+
+ const char* GetName() const {
+ return info_.name;
+ }
+
+ private:
+ jvmtiEnv* jvmtienv_;
+ JNIEnv* env_;
+ bool free_name_;
+ jvmtiThreadInfo info_;
+};
+
+class ScopedClassInfo {
+ public:
+ ScopedClassInfo(jvmtiEnv* jvmtienv, jclass c)
+ : jvmtienv_(jvmtienv),
+ class_(c),
+ name_(nullptr),
+ generic_(nullptr) {}
+
+ ~ScopedClassInfo() {
+ jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(name_));
+ jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(generic_));
+ }
+
+ bool Init() {
+ return jvmtienv_->GetClassSignature(class_, &name_, &generic_) == JVMTI_ERROR_NONE;
+ }
+
+ jclass GetClass() const {
+ return class_;
+ }
+ const char* GetName() const {
+ return name_;
+ }
+ const char* GetGeneric() const {
+ return generic_;
+ }
+
+ private:
+ jvmtiEnv* jvmtienv_;
+ jclass class_;
+ char* name_;
+ char* generic_;
+};
+
+class ScopedMethodInfo {
+ public:
+ ScopedMethodInfo(jvmtiEnv* jvmtienv, JNIEnv* env, jmethodID m)
+ : jvmtienv_(jvmtienv),
+ env_(env),
+ method_(m),
+ declaring_class_(nullptr),
+ class_info_(nullptr),
+ name_(nullptr),
+ signature_(nullptr),
+ generic_(nullptr) {}
+
+ ~ScopedMethodInfo() {
+ env_->DeleteLocalRef(declaring_class_);
+ jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(name_));
+ jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(signature_));
+ jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(generic_));
+ }
+
+ bool Init() {
+ if (jvmtienv_->GetMethodDeclaringClass(method_, &declaring_class_) != JVMTI_ERROR_NONE) {
+ return false;
+ }
+ class_info_.reset(new ScopedClassInfo(jvmtienv_, declaring_class_));
+ return class_info_->Init() &&
+ (jvmtienv_->GetMethodName(method_, &name_, &signature_, &generic_) == JVMTI_ERROR_NONE);
+ }
+
+ const ScopedClassInfo& GetDeclaringClassInfo() const {
+ return *class_info_;
+ }
+
+ jclass GetDeclaringClass() const {
+ return declaring_class_;
+ }
+
+ const char* GetName() const {
+ return name_;
+ }
+
+ const char* GetSignature() const {
+ return signature_;
+ }
+
+ const char* GetGeneric() const {
+ return generic_;
+ }
+
+ private:
+ jvmtiEnv* jvmtienv_;
+ JNIEnv* env_;
+ jmethodID method_;
+ jclass declaring_class_;
+ std::unique_ptr<ScopedClassInfo> class_info_;
+ char* name_;
+ char* signature_;
+ char* generic_;
+
+ friend std::ostream& operator<<(std::ostream &os, ScopedMethodInfo const& m);
+};
+
+std::ostream& operator<<(std::ostream &os, const ScopedMethodInfo* m) {
+ return os << *m;
+}
+
+std::ostream& operator<<(std::ostream &os, ScopedMethodInfo const& m) {
+ return os << m.GetDeclaringClassInfo().GetName() << "->" << m.GetName() << m.GetSignature();
+}
+
static void doJvmtiMethodBind(jvmtiEnv* jvmtienv,
JNIEnv* env,
jthread thread,
@@ -94,38 +231,14 @@
void* address,
/*out*/void** out_address) {
*out_address = address;
- jvmtiThreadInfo info;
- if (thread == nullptr) {
- info.name = const_cast<char*>("<NULLPTR>");
- } else if (jvmtienv->GetThreadInfo(thread, &info) != JVMTI_ERROR_NONE) {
- info.name = const_cast<char*>("<UNKNOWN THREAD>");
- }
- char *fname, *fsig, *fgen;
- char *cname, *cgen;
- jclass klass = nullptr;
- if (jvmtienv->GetMethodDeclaringClass(m, &klass) != JVMTI_ERROR_NONE) {
- LOG(ERROR) << "Unable to get method declaring class!";
+ ScopedThreadInfo thread_info(jvmtienv, env, thread);
+ ScopedMethodInfo method_info(jvmtienv, env, m);
+ if (!method_info.Init()) {
+ LOG(ERROR) << "Unable to get method info!";
return;
}
- if (jvmtienv->GetMethodName(m, &fname, &fsig, &fgen) != JVMTI_ERROR_NONE) {
- LOG(ERROR) << "Unable to get method name!";
- env->DeleteLocalRef(klass);
- return;
- }
- if (jvmtienv->GetClassSignature(klass, &cname, &cgen) != JVMTI_ERROR_NONE) {
- LOG(ERROR) << "Unable to get class name!";
- env->DeleteLocalRef(klass);
- return;
- }
- LOG(INFO) << "Loading native method \"" << cname << "->" << fname << fsig << "\". Thread is \""
- << info.name << "\"";
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(cname));
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(cgen));
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fname));
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fsig));
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fgen));
- env->DeleteLocalRef(klass);
- return;
+ LOG(INFO) << "Loading native method \"" << method_info << "\". Thread is "
+ << thread_info.GetName();
}
static std::string GetName(jvmtiEnv* jvmtienv, JNIEnv* jnienv, jobject obj) {
@@ -197,80 +310,32 @@
jmethodID m,
jboolean was_popped_by_exception,
jvalue val) {
- jvmtiThreadInfo info;
- if (thread == nullptr) {
- info.name = const_cast<char*>("<NULLPTR>");
- } else if (jvmtienv->GetThreadInfo(thread, &info) != JVMTI_ERROR_NONE) {
- // LOG(WARNING) << "Unable to get thread info!";
- info.name = const_cast<char*>("<UNKNOWN THREAD>");
- }
- char *fname, *fsig, *fgen;
- char *cname, *cgen;
- jclass klass = nullptr;
- if (jvmtienv->GetMethodDeclaringClass(m, &klass) != JVMTI_ERROR_NONE) {
- LOG(ERROR) << "Unable to get method declaring class!";
+ ScopedThreadInfo info(jvmtienv, env, thread);
+ ScopedMethodInfo method_info(jvmtienv, env, m);
+ if (!method_info.Init()) {
+ LOG(ERROR) << "Unable to get method info!";
return;
}
- if (jvmtienv->GetMethodName(m, &fname, &fsig, &fgen) != JVMTI_ERROR_NONE) {
- LOG(ERROR) << "Unable to get method name!";
- env->DeleteLocalRef(klass);
- return;
- }
- if (jvmtienv->GetClassSignature(klass, &cname, &cgen) != JVMTI_ERROR_NONE) {
- LOG(ERROR) << "Unable to get class name!";
- env->DeleteLocalRef(klass);
- return;
- }
- std::string type(fsig);
+ std::string type(method_info.GetSignature());
type = type.substr(type.find(")") + 1);
std::string out_val(was_popped_by_exception ? "" : GetValOf(jvmtienv, env, type, val));
- LOG(INFO) << "Leaving method \"" << cname << "->" << fname << fsig << "\". Thread is \""
- << info.name << "\"." << std::endl
+ LOG(INFO) << "Leaving method \"" << method_info << "\". Thread is \"" << info.GetName() << "\"."
+ << std::endl
<< " Cause: " << (was_popped_by_exception ? "exception" : "return ")
<< out_val << ".";
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(cname));
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(cgen));
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fname));
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fsig));
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fgen));
- env->DeleteLocalRef(klass);
}
void JNICALL MethodEntryHook(jvmtiEnv* jvmtienv,
JNIEnv* env,
jthread thread,
jmethodID m) {
- jvmtiThreadInfo info;
- if (thread == nullptr) {
- info.name = const_cast<char*>("<NULLPTR>");
- } else if (jvmtienv->GetThreadInfo(thread, &info) != JVMTI_ERROR_NONE) {
- info.name = const_cast<char*>("<UNKNOWN THREAD>");
- }
- char *fname, *fsig, *fgen;
- char *cname, *cgen;
- jclass klass = nullptr;
- if (jvmtienv->GetMethodDeclaringClass(m, &klass) != JVMTI_ERROR_NONE) {
- LOG(ERROR) << "Unable to get method declaring class!";
+ ScopedThreadInfo info(jvmtienv, env, thread);
+ ScopedMethodInfo method_info(jvmtienv, env, m);
+ if (!method_info.Init()) {
+ LOG(ERROR) << "Unable to get method info!";
return;
}
- if (jvmtienv->GetMethodName(m, &fname, &fsig, &fgen) != JVMTI_ERROR_NONE) {
- LOG(ERROR) << "Unable to get method name!";
- env->DeleteLocalRef(klass);
- return;
- }
- if (jvmtienv->GetClassSignature(klass, &cname, &cgen) != JVMTI_ERROR_NONE) {
- LOG(ERROR) << "Unable to get class name!";
- env->DeleteLocalRef(klass);
- return;
- }
- LOG(INFO) << "Entering method \"" << cname << "->" << fname << fsig << "\". Thread is \""
- << info.name << "\"";
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(cname));
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(cgen));
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fname));
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fsig));
- jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(fgen));
- env->DeleteLocalRef(klass);
+ LOG(INFO) << "Entering method \"" << method_info << "\". Thread is \"" << info.GetName() << "\"";
}
// The hook we are using.
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 0c58585..f340fa1 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -1,6 +1,6 @@
/*
* This file contains expectations for ART's buildbot. The purpose of this file is
- * to temporary and quickly list failing tests and not break the bots, until the
+ * to temporarily list failing tests and not break the bots, until the
* libcore expectation files get properly updated. The script that uses this file
* is art/tools/run-libcore-tests.sh.
*
diff --git a/tools/libcore_gcstress_debug_failures.txt b/tools/libcore_gcstress_debug_failures.txt
new file mode 100644
index 0000000..b4c6f2b
--- /dev/null
+++ b/tools/libcore_gcstress_debug_failures.txt
@@ -0,0 +1,16 @@
+/*
+ * This file contains expectations for ART's buildbot when running gcstress in debug mode.
+ * The script that uses this file is art/tools/run-libcore-tests.sh.
+ */
+
+[
+{
+ description: "Timeouts on target with gcstress and debug.",
+ result: EXEC_FAILED,
+ modes: [device],
+ names: ["libcore.icu.TransliteratorTest#testAll",
+ "libcore.java.lang.ref.ReferenceQueueTest#testRemoveWithDelayedResultAndTimeout",
+ "libcore.java.util.TimeZoneTest#testSetDefaultDeadlock",
+ "org.apache.harmony.tests.java.util.TimerTest#testThrowingTaskKillsTimerThread"]
+}
+]
diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh
index f9f3754..8b3df3a 100755
--- a/tools/run-libcore-tests.sh
+++ b/tools/run-libcore-tests.sh
@@ -103,6 +103,9 @@
# "org.apache.harmony.security"
vogar_args=$@
+gcstress=false
+debug=false
+
while true; do
if [[ "$1" == "--mode=device" ]]; then
vogar_args="$vogar_args --device-dir=/data/local/tmp"
@@ -125,6 +128,10 @@
# Remove the --debug from the arguments.
vogar_args=${vogar_args/$1}
vogar_args="$vogar_args --vm-arg -XXlib:libartd.so"
+ debug=true
+ shift
+ elif [[ "$1" == "-Xgc:gcstress" ]]; then
+ gcstress=true
shift
elif [[ "$1" == "" ]]; then
break
@@ -151,6 +158,11 @@
fi
vogar_args="$vogar_args --vm-arg -Xusejit:$use_jit"
+# gcstress and debug may lead to timeouts, so we need a dedicated expectations file for it.
+if [[ $gcstress && $debug ]]; then
+ expectations="$expectations --expectations art/tools/libcore_gcstress_debug_failures.txt"
+fi
+
# Run the tests using vogar.
echo "Running tests for the following test packages:"
echo ${working_packages[@]} | tr " " "\n"