Merge "ART: Break up x86 disassembler main function"
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 0a3f830..d39f1c7 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -224,31 +224,35 @@
current_block_ = nullptr;
}
-static bool ShouldSkipCompilation(const CompilerDriver& compiler_driver,
- const DexCompilationUnit& dex_compilation_unit,
- size_t number_of_dex_instructions,
- size_t number_of_blocks ATTRIBUTE_UNUSED,
- size_t number_of_branches) {
- const CompilerOptions& compiler_options = compiler_driver.GetCompilerOptions();
+void HGraphBuilder::MaybeRecordStat(MethodCompilationStat compilation_stat) {
+ if (compilation_stats_ != nullptr) {
+ compilation_stats_->RecordStat(compilation_stat);
+ }
+}
+
+bool HGraphBuilder::SkipCompilation(size_t number_of_dex_instructions,
+ size_t number_of_blocks ATTRIBUTE_UNUSED,
+ size_t number_of_branches) {
+ const CompilerOptions& compiler_options = compiler_driver_->GetCompilerOptions();
CompilerOptions::CompilerFilter compiler_filter = compiler_options.GetCompilerFilter();
if (compiler_filter == CompilerOptions::kEverything) {
return false;
}
if (compiler_options.IsHugeMethod(number_of_dex_instructions)) {
- LOG(INFO) << "Skip compilation of huge method "
- << PrettyMethod(dex_compilation_unit.GetDexMethodIndex(),
- *dex_compilation_unit.GetDexFile())
- << ": " << number_of_dex_instructions << " dex instructions";
+ VLOG(compiler) << "Skip compilation of huge method "
+ << PrettyMethod(dex_compilation_unit_->GetDexMethodIndex(), *dex_file_)
+ << ": " << number_of_dex_instructions << " dex instructions";
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledHugeMethod);
return true;
}
// If it's large and contains no branches, it's likely to be machine generated initialization.
if (compiler_options.IsLargeMethod(number_of_dex_instructions) && (number_of_branches == 0)) {
- LOG(INFO) << "Skip compilation of large method with no branch "
- << PrettyMethod(dex_compilation_unit.GetDexMethodIndex(),
- *dex_compilation_unit.GetDexFile())
- << ": " << number_of_dex_instructions << " dex instructions";
+ VLOG(compiler) << "Skip compilation of large method with no branch "
+ << PrettyMethod(dex_compilation_unit_->GetDexMethodIndex(), *dex_file_)
+ << ": " << number_of_dex_instructions << " dex instructions";
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledLargeMethodNoBranches);
return true;
}
@@ -283,14 +287,9 @@
code_ptr, code_end, &number_of_dex_instructions, &number_of_blocks, &number_of_branches);
// Note that the compiler driver is null when unit testing.
- if (compiler_driver_ != nullptr) {
- if (ShouldSkipCompilation(*compiler_driver_,
- *dex_compilation_unit_,
- number_of_dex_instructions,
- number_of_blocks,
- number_of_branches)) {
- return nullptr;
- }
+ if ((compiler_driver_ != nullptr)
+ && SkipCompilation(number_of_dex_instructions, number_of_blocks, number_of_branches)) {
+ return nullptr;
}
// Also create blocks for catch handlers.
@@ -319,7 +318,9 @@
// Update the current block if dex_pc starts a new block.
MaybeUpdateCurrentBlock(dex_pc);
const Instruction& instruction = *Instruction::At(code_ptr);
- if (!AnalyzeDexInstruction(instruction, dex_pc)) return nullptr;
+ if (!AnalyzeDexInstruction(instruction, dex_pc)) {
+ return nullptr;
+ }
dex_pc += instruction.SizeInCodeUnits();
code_ptr += instruction.SizeInCodeUnits();
}
@@ -593,8 +594,9 @@
if (!compiler_driver_->ComputeInvokeInfo(dex_compilation_unit_, dex_pc, true, true,
&optimized_invoke_type, &target_method, &table_index,
&direct_code, &direct_method)) {
- LOG(INFO) << "Did not compile " << PrettyMethod(method_idx, *dex_file_)
- << " because a method call could not be resolved";
+ VLOG(compiler) << "Did not compile " << PrettyMethod(method_idx, *dex_file_)
+ << " because a method call could not be resolved";
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledUnresolvedMethod);
return false;
}
DCHECK(optimized_invoke_type != kSuper);
@@ -636,6 +638,7 @@
LOG(WARNING) << "Non sequential register pair in " << dex_compilation_unit_->GetSymbol()
<< " at " << dex_pc;
// We do not implement non sequential register pair.
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledNonSequentialRegPair);
return false;
}
HInstruction* arg = LoadLocal(is_range ? register_index + i : args[i], type);
@@ -664,9 +667,11 @@
compiler_driver_->ComputeInstanceFieldInfo(field_index, dex_compilation_unit_, is_put, soa)));
if (resolved_field.Get() == nullptr) {
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledUnresolvedField);
return false;
}
if (resolved_field->IsVolatile()) {
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledVolatile);
return false;
}
@@ -721,10 +726,12 @@
&is_initialized,
&field_type);
if (!fast_path) {
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledUnresolvedField);
return false;
}
if (is_volatile) {
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledVolatile);
return false;
}
@@ -947,6 +954,7 @@
dex_compilation_unit_->GetDexMethodIndex(), *dex_file_, type_index,
&type_known_final, &type_known_abstract, &is_referrers_class);
if (!can_access) {
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledCantAccesType);
return false;
}
HInstruction* object = LoadLocal(reference, Primitive::kPrimNot);
@@ -967,7 +975,7 @@
return true;
}
-bool HGraphBuilder::BuildPackedSwitch(const Instruction& instruction, uint32_t dex_pc) {
+void HGraphBuilder::BuildPackedSwitch(const Instruction& instruction, uint32_t dex_pc) {
SwitchTable table(instruction, dex_pc, false);
// Value to test against.
@@ -984,10 +992,9 @@
BuildSwitchCaseHelper(instruction, i, i == num_entries, table, value, starting_key + i - 1,
table.GetEntryAt(i), dex_pc);
}
- return true;
}
-bool HGraphBuilder::BuildSparseSwitch(const Instruction& instruction, uint32_t dex_pc) {
+void HGraphBuilder::BuildSparseSwitch(const Instruction& instruction, uint32_t dex_pc) {
SwitchTable table(instruction, dex_pc, true);
// Value to test against.
@@ -1001,7 +1008,6 @@
BuildSwitchCaseHelper(instruction, i, i == static_cast<size_t>(num_entries) - 1, table, value,
table.GetEntryAt(i), table.GetEntryAt(i + num_entries), dex_pc);
}
- return true;
}
void HGraphBuilder::BuildSwitchCaseHelper(const Instruction& instruction, size_t index,
@@ -1928,6 +1934,7 @@
dex_compilation_unit_->GetDexMethodIndex(), *dex_file_, type_index,
&type_known_final, &type_known_abstract, &is_referrers_class);
if (!can_access) {
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledCantAccesType);
return false;
}
current_block_->AddInstruction(
@@ -1989,20 +1996,21 @@
}
case Instruction::PACKED_SWITCH: {
- if (!BuildPackedSwitch(instruction, dex_pc)) {
- return false;
- }
+ BuildPackedSwitch(instruction, dex_pc);
break;
}
case Instruction::SPARSE_SWITCH: {
- if (!BuildSparseSwitch(instruction, dex_pc)) {
- return false;
- }
+ BuildSparseSwitch(instruction, dex_pc);
break;
}
default:
+ VLOG(compiler) << "Did not compile "
+ << PrettyMethod(dex_compilation_unit_->GetDexMethodIndex(), *dex_file_)
+ << " because of unhandled instruction "
+ << instruction.Name();
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledUnhandledInstruction);
return false;
}
return true;
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 73c2f50..75c8634 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -21,6 +21,7 @@
#include "dex_file-inl.h"
#include "driver/compiler_driver.h"
#include "driver/dex_compilation_unit.h"
+#include "optimizing_compiler_stats.h"
#include "primitive.h"
#include "utils/arena_object.h"
#include "utils/growable_array.h"
@@ -36,7 +37,8 @@
HGraphBuilder(ArenaAllocator* arena,
DexCompilationUnit* dex_compilation_unit,
const DexFile* dex_file,
- CompilerDriver* driver)
+ CompilerDriver* driver,
+ OptimizingCompilerStats* compiler_stats)
: arena_(arena),
branch_targets_(arena, 0),
locals_(arena, 0),
@@ -51,7 +53,8 @@
compiler_driver_(driver),
return_type_(Primitive::GetType(dex_compilation_unit_->GetShorty()[0])),
code_start_(nullptr),
- latest_result_(nullptr) {}
+ latest_result_(nullptr),
+ compilation_stats_(compiler_stats) {}
// Only for unit testing.
HGraphBuilder(ArenaAllocator* arena, Primitive::Type return_type = Primitive::kPrimInt)
@@ -69,7 +72,8 @@
compiler_driver_(nullptr),
return_type_(return_type),
code_start_(nullptr),
- latest_result_(nullptr) {}
+ latest_result_(nullptr),
+ compilation_stats_(nullptr) {}
HGraph* BuildGraph(const DexFile::CodeItem& code);
@@ -205,16 +209,22 @@
uint32_t dex_pc);
// Builds an instruction sequence for a packed switch statement.
- bool BuildPackedSwitch(const Instruction& instruction, uint32_t dex_pc);
+ void BuildPackedSwitch(const Instruction& instruction, uint32_t dex_pc);
// Builds an instruction sequence for a sparse switch statement.
- bool BuildSparseSwitch(const Instruction& instruction, uint32_t dex_pc);
+ void BuildSparseSwitch(const Instruction& instruction, uint32_t dex_pc);
void BuildSwitchCaseHelper(const Instruction& instruction, size_t index,
bool is_last_case, const SwitchTable& table,
HInstruction* value, int32_t case_value_int,
int32_t target_offset, uint32_t dex_pc);
+ bool SkipCompilation(size_t number_of_dex_instructions,
+ size_t number_of_blocks,
+ size_t number_of_branches);
+
+ void MaybeRecordStat(MethodCompilationStat compilation_stat);
+
ArenaAllocator* const arena_;
// A list of the size of the dex code holding block information for
@@ -245,6 +255,8 @@
// used by move-result instructions.
HInstruction* latest_result_;
+ OptimizingCompilerStats* compilation_stats_;
+
DISALLOW_COPY_AND_ASSIGN(HGraphBuilder);
};
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 89a0cf9..d47217f 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -121,9 +121,8 @@
// Whether we should run any optimization or register allocation. If false, will
// just run the code generation after the graph was built.
const bool run_optimizations_;
- mutable AtomicInteger total_compiled_methods_;
- mutable AtomicInteger unoptimized_compiled_methods_;
- mutable AtomicInteger optimized_compiled_methods_;
+
+ mutable OptimizingCompilerStats compilation_stats_;
std::unique_ptr<std::ostream> visualizer_output_;
@@ -136,24 +135,14 @@
: Compiler(driver, kMaximumCompilationTimeBeforeWarning),
run_optimizations_(
driver->GetCompilerOptions().GetCompilerFilter() != CompilerOptions::kTime),
- total_compiled_methods_(0),
- unoptimized_compiled_methods_(0),
- optimized_compiled_methods_(0) {
+ compilation_stats_() {
if (kIsVisualizerEnabled) {
visualizer_output_.reset(new std::ofstream("art.cfg"));
}
}
OptimizingCompiler::~OptimizingCompiler() {
- if (total_compiled_methods_ == 0) {
- LOG(INFO) << "Did not compile any method.";
- } else {
- size_t unoptimized_percent = (unoptimized_compiled_methods_ * 100 / total_compiled_methods_);
- size_t optimized_percent = (optimized_compiled_methods_ * 100 / total_compiled_methods_);
- LOG(INFO) << "Compiled " << total_compiled_methods_ << " methods: "
- << unoptimized_percent << "% (" << unoptimized_compiled_methods_ << ") unoptimized, "
- << optimized_percent << "% (" << optimized_compiled_methods_ << ") optimized.";
- }
+ compilation_stats_.Log();
}
bool OptimizingCompiler::CanCompileMethod(uint32_t method_idx ATTRIBUTE_UNUSED,
@@ -257,7 +246,7 @@
jobject class_loader,
const DexFile& dex_file) const {
UNUSED(invoke_type);
- total_compiled_methods_++;
+ compilation_stats_.RecordStat(MethodCompilationStat::kAttemptCompilation);
InstructionSet instruction_set = GetCompilerDriver()->GetInstructionSet();
// Always use the thumb2 assembler: some runtime functionality (like implicit stack
// overflow checks) assume thumb2.
@@ -267,10 +256,12 @@
// Do not attempt to compile on architectures we do not support.
if (!IsInstructionSetSupported(instruction_set)) {
+ compilation_stats_.RecordStat(MethodCompilationStat::kNotCompiledUnsupportedIsa);
return nullptr;
}
if (Compiler::IsPathologicalCase(*code_item, method_idx, dex_file)) {
+ compilation_stats_.RecordStat(MethodCompilationStat::kNotCompiledPathological);
return nullptr;
}
@@ -287,7 +278,10 @@
ArenaPool pool;
ArenaAllocator arena(&pool);
- HGraphBuilder builder(&arena, &dex_compilation_unit, &dex_file, GetCompilerDriver());
+ HGraphBuilder builder(&arena,
+ &dex_compilation_unit,
+ &dex_file, GetCompilerDriver(),
+ &compilation_stats_);
HGraph* graph = builder.BuildGraph(*code_item);
if (graph == nullptr) {
@@ -298,6 +292,7 @@
CodeGenerator* codegen = CodeGenerator::Create(&arena, graph, instruction_set);
if (codegen == nullptr) {
CHECK(!shouldCompile) << "Could not find code generator for optimizing compiler";
+ compilation_stats_.RecordStat(MethodCompilationStat::kNotCompiledNoCodegen);
return nullptr;
}
@@ -307,13 +302,13 @@
CodeVectorAllocator allocator;
- if (run_optimizations_
- && CanOptimize(*code_item)
- && RegisterAllocator::CanAllocateRegistersFor(*graph, instruction_set)) {
+ bool can_optimize = CanOptimize(*code_item);
+ bool can_allocate_registers = RegisterAllocator::CanAllocateRegistersFor(*graph, instruction_set);
+ if (run_optimizations_ && can_optimize && can_allocate_registers) {
VLOG(compiler) << "Optimizing " << PrettyMethod(method_idx, dex_file);
- optimized_compiled_methods_++;
if (!TryBuildingSsa(graph, dex_compilation_unit, visualizer)) {
// We could not transform the graph to SSA, bailout.
+ compilation_stats_.RecordStat(MethodCompilationStat::kNotCompiledCannotBuildSSA);
return nullptr;
}
RunOptimizations(graph, visualizer);
@@ -332,6 +327,7 @@
std::vector<uint8_t> stack_map;
codegen->BuildStackMaps(&stack_map);
+ compilation_stats_.RecordStat(MethodCompilationStat::kCompiledOptimized);
return new CompiledMethod(GetCompilerDriver(),
instruction_set,
allocator.GetMemory(),
@@ -344,7 +340,15 @@
UNREACHABLE();
} else {
VLOG(compiler) << "Compile baseline " << PrettyMethod(method_idx, dex_file);
- unoptimized_compiled_methods_++;
+
+ if (!run_optimizations_) {
+ compilation_stats_.RecordStat(MethodCompilationStat::kNotOptimizedDisabled);
+ } else if (!can_optimize) {
+ compilation_stats_.RecordStat(MethodCompilationStat::kNotOptimizedTryCatch);
+ } else if (!can_allocate_registers) {
+ compilation_stats_.RecordStat(MethodCompilationStat::kNotOptimizedRegisterAllocator);
+ }
+
codegen->CompileBaseline(&allocator);
std::vector<uint8_t> mapping_table;
@@ -357,6 +361,7 @@
std::vector<uint8_t> gc_map;
codegen->BuildNativeGCMap(&gc_map, dex_compilation_unit);
+ compilation_stats_.RecordStat(MethodCompilationStat::kCompiledBaseline);
return new CompiledMethod(GetCompilerDriver(),
instruction_set,
allocator.GetMemory(),
diff --git a/compiler/optimizing/optimizing_compiler.h b/compiler/optimizing/optimizing_compiler.h
index a415eca..d076fb5 100644
--- a/compiler/optimizing/optimizing_compiler.h
+++ b/compiler/optimizing/optimizing_compiler.h
@@ -24,6 +24,6 @@
Compiler* CreateOptimizingCompiler(CompilerDriver* driver);
-}
+} // namespace art
#endif // ART_COMPILER_OPTIMIZING_OPTIMIZING_COMPILER_H_
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
new file mode 100644
index 0000000..829982e
--- /dev/null
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_OPTIMIZING_COMPILER_STATS_H_
+#define ART_COMPILER_OPTIMIZING_OPTIMIZING_COMPILER_STATS_H_
+
+#include <sstream>
+#include <string>
+
+#include "atomic.h"
+
+namespace art {
+
+enum MethodCompilationStat {
+ kAttemptCompilation = 0,
+ kCompiledBaseline,
+ kCompiledOptimized,
+ kNotCompiledUnsupportedIsa,
+ kNotCompiledPathological,
+ kNotCompiledHugeMethod,
+ kNotCompiledLargeMethodNoBranches,
+ kNotCompiledCannotBuildSSA,
+ kNotCompiledNoCodegen,
+ kNotCompiledUnresolvedMethod,
+ kNotCompiledUnresolvedField,
+ kNotCompiledNonSequentialRegPair,
+ kNotCompiledVolatile,
+ kNotOptimizedTryCatch,
+ kNotOptimizedDisabled,
+ kNotCompiledCantAccesType,
+ kNotOptimizedRegisterAllocator,
+ kNotCompiledUnhandledInstruction,
+ kLastStat
+};
+
+class OptimizingCompilerStats {
+ public:
+ OptimizingCompilerStats() {}
+
+ void RecordStat(MethodCompilationStat stat) {
+ compile_stats_[stat]++;
+ }
+
+ void Log() const {
+ if (compile_stats_[kAttemptCompilation] == 0) {
+ LOG(INFO) << "Did not compile any method.";
+ } else {
+ size_t unoptimized_percent =
+ compile_stats_[kCompiledBaseline] * 100 / compile_stats_[kAttemptCompilation];
+ size_t optimized_percent =
+ compile_stats_[kCompiledOptimized] * 100 / compile_stats_[kAttemptCompilation];
+ std::ostringstream oss;
+ oss << "Attempted compilation of " << compile_stats_[kAttemptCompilation] << " methods: "
+ << unoptimized_percent << "% (" << compile_stats_[kCompiledBaseline] << ") unoptimized, "
+ << optimized_percent << "% (" << compile_stats_[kCompiledOptimized] << ") optimized.\n";
+ for (int i = 0; i < kLastStat; i++) {
+ if (compile_stats_[i] != 0) {
+ oss << PrintMethodCompilationStat(i) << ": " << compile_stats_[i] << "\n";
+ }
+ }
+ LOG(INFO) << oss.str();
+ }
+ }
+
+ private:
+ std::string PrintMethodCompilationStat(int stat) const {
+ switch (stat) {
+ case kAttemptCompilation : return "kAttemptCompilation";
+ case kCompiledBaseline : return "kCompiledBaseline";
+ case kCompiledOptimized : return "kCompiledOptimized";
+ case kNotCompiledUnsupportedIsa : return "kNotCompiledUnsupportedIsa";
+ case kNotCompiledPathological : return "kNotCompiledPathological";
+ case kNotCompiledHugeMethod : return "kNotCompiledHugeMethod";
+ case kNotCompiledLargeMethodNoBranches : return "kNotCompiledLargeMethodNoBranches";
+ case kNotCompiledCannotBuildSSA : return "kNotCompiledCannotBuildSSA";
+ case kNotCompiledNoCodegen : return "kNotCompiledNoCodegen";
+ case kNotCompiledUnresolvedMethod : return "kNotCompiledUnresolvedMethod";
+ case kNotCompiledUnresolvedField : return "kNotCompiledUnresolvedField";
+ case kNotCompiledNonSequentialRegPair : return "kNotCompiledNonSequentialRegPair";
+ case kNotCompiledVolatile : return "kNotCompiledVolatile";
+ case kNotOptimizedDisabled : return "kNotOptimizedDisabled";
+ case kNotOptimizedTryCatch : return "kNotOptimizedTryCatch";
+ case kNotCompiledCantAccesType : return "kNotCompiledCantAccesType";
+ case kNotOptimizedRegisterAllocator : return "kNotOptimizedRegisterAllocator";
+ case kNotCompiledUnhandledInstruction : return "kNotCompiledUnhandledInstruction";
+ default: LOG(FATAL) << "invalid stat";
+ }
+ return "";
+ }
+
+ AtomicInteger compile_stats_[kLastStat];
+
+ DISALLOW_COPY_AND_ASSIGN(OptimizingCompilerStats);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_OPTIMIZING_COMPILER_STATS_H_
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 0ae54dc..66ea3ce 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -379,12 +379,17 @@
* +-------------------------+
*/
ENTRY art_quick_invoke_stub_internal
- push {r4, r9, r11, lr} @ spill regs
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr} @ spill regs
.cfi_adjust_cfa_offset 16
.cfi_rel_offset r4, 0
- .cfi_rel_offset r9, 4
- .cfi_rel_offset r11, 8
- .cfi_rel_offset lr, 12
+ .cfi_rel_offset r5, 4
+ .cfi_rel_offset r6, 8
+ .cfi_rel_offset r7, 12
+ .cfi_rel_offset r8, 16
+ .cfi_rel_offset r9, 20
+ .cfi_rel_offset r10, 24
+ .cfi_rel_offset r11, 28
+ .cfi_rel_offset lr, 32
mov r11, sp @ save the stack pointer
.cfi_def_cfa_register r11
@@ -401,10 +406,10 @@
mov ip, #0 @ set ip to 0
str ip, [sp] @ store NULL for method* at bottom of frame
- ldr ip, [r11, #28] @ load fp register argument array pointer
+ ldr ip, [r11, #48] @ load fp register argument array pointer
vldm ip, {s0-s15} @ copy s0 - s15
- ldr ip, [r11, #24] @ load core register argument array pointer
+ ldr ip, [r11, #44] @ load core register argument array pointer
mov r0, r4 @ restore method*
add ip, ip, #4 @ skip r0
ldm ip, {r1-r3} @ copy r1 - r3
@@ -419,14 +424,14 @@
mov sp, r11 @ restore the stack pointer
.cfi_def_cfa_register sp
- ldr r4, [sp, #20] @ load result_is_float
- ldr r9, [sp, #16] @ load the result pointer
+ ldr r4, [sp, #40] @ load result_is_float
+ ldr r9, [sp, #36] @ load the result pointer
cmp r4, #0
ite eq
strdeq r0, [r9] @ store r0/r1 into result pointer
vstrne d0, [r9] @ store s0-s1/d0 into result pointer
- pop {r4, r9, r11, pc} @ restore spill regs
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, pc} @ restore spill regs
END art_quick_invoke_stub_internal
/*
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 4415935..6047bb0 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -499,7 +499,7 @@
.macro INVOKE_STUB_CREATE_FRAME
-SAVE_SIZE=6*8 // x4, x5, xSUSPEND, SP, LR & FP saved.
+SAVE_SIZE=15*8 // x4, x5, x20, x21, x22, x23, x24, x25, x26, x27, x28, xSUSPEND, SP, LR, FP saved.
SAVE_SIZE_AND_METHOD=SAVE_SIZE+STACK_REFERENCE_SIZE
@@ -515,6 +515,25 @@
.cfi_def_cfa_register x10 // before this.
.cfi_adjust_cfa_offset SAVE_SIZE
+ str x28, [x10, #112]
+ .cfi_rel_offset x28, 112
+
+ stp x26, x27, [x10, #96]
+ .cfi_rel_offset x26, 96
+ .cfi_rel_offset x27, 104
+
+ stp x24, x25, [x10, #80]
+ .cfi_rel_offset x24, 80
+ .cfi_rel_offset x25, 88
+
+ stp x22, x23, [x10, #64]
+ .cfi_rel_offset x22, 64
+ .cfi_rel_offset x23, 72
+
+ stp x20, x21, [x10, #48]
+ .cfi_rel_offset x20, 48
+ .cfi_rel_offset x21, 56
+
stp x9, xSUSPEND, [x10, #32] // Save old stack pointer and xSUSPEND
.cfi_rel_offset sp, 32
.cfi_rel_offset x19, 40
@@ -573,6 +592,25 @@
.cfi_restore x4
.cfi_restore x5
+ ldr x28, [xFP, #112]
+ .cfi_restore x28
+
+ ldp x26, x27, [xFP, #96]
+ .cfi_restore x26
+ .cfi_restore x27
+
+ ldp x24, x25, [xFP, #80]
+ .cfi_restore x24
+ .cfi_restore x25
+
+ ldp x22, x23, [xFP, #64]
+ .cfi_restore x22
+ .cfi_restore x23
+
+ ldp x20, x21, [xFP, #48]
+ .cfi_restore x20
+ .cfi_restore x21
+
// Store result (w0/x0/s0/d0) appropriately, depending on resultType.
ldrb w10, [x5]
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 0bfa1ce..302b9f8 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -297,28 +297,34 @@
DEFINE_FUNCTION art_quick_invoke_stub
PUSH ebp // save ebp
PUSH ebx // save ebx
+ PUSH esi // save esi
+ PUSH edi // save edi
mov %esp, %ebp // copy value of stack pointer into base pointer
CFI_DEF_CFA_REGISTER(ebp)
- mov 20(%ebp), %ebx // get arg array size
- addl LITERAL(28), %ebx // reserve space for return addr, method*, ebx, and ebp in frame
- andl LITERAL(0xFFFFFFF0), %ebx // align frame size to 16 bytes
- subl LITERAL(12), %ebx // remove space for return address, ebx, and ebp
+ mov 28(%ebp), %ebx // get arg array size
+ // reserve space for return addr, method*, ebx, ebp, esi, and edi in frame
+ addl LITERAL(36), %ebx
+ // align frame size to 16 bytes
+ andl LITERAL(0xFFFFFFF0), %ebx
+ subl LITERAL(20), %ebx // remove space for return address, ebx, ebp, esi and edi
subl %ebx, %esp // reserve stack space for argument array
SETUP_GOT_NOSAVE ebx // clobbers ebx (harmless here)
lea 4(%esp), %eax // use stack pointer + method ptr as dest for memcpy
- pushl 20(%ebp) // push size of region to memcpy
- pushl 16(%ebp) // push arg array as source of memcpy
+ pushl 28(%ebp) // push size of region to memcpy
+ pushl 24(%ebp) // push arg array as source of memcpy
pushl %eax // push stack pointer as destination of memcpy
call PLT_SYMBOL(memcpy) // (void*, const void*, size_t)
addl LITERAL(12), %esp // pop arguments to memcpy
movl LITERAL(0), (%esp) // store NULL for method*
- mov 12(%ebp), %eax // move method pointer into eax
+ mov 20(%ebp), %eax // move method pointer into eax
mov 4(%esp), %ecx // copy arg1 into ecx
mov 8(%esp), %edx // copy arg2 into edx
mov 12(%esp), %ebx // copy arg3 into ebx
call *MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32(%eax) // call the method
mov %ebp, %esp // restore stack pointer
CFI_DEF_CFA_REGISTER(esp)
+ POP edi // pop edi
+ POP esi // pop esi
POP ebx // pop ebx
POP ebp // pop ebp
mov 20(%esp), %ecx // get result pointer
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 7f85ab7..5ae65db 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -487,15 +487,21 @@
PUSH rbp // Save rbp.
PUSH r8 // Save r8/result*.
PUSH r9 // Save r9/shorty*.
+ PUSH rbx // Save native callee save rbx
+ PUSH r12 // Save native callee save r12
+ PUSH r13 // Save native callee save r13
+ PUSH r14 // Save native callee save r14
+ PUSH r15 // Save native callee save r15
movq %rsp, %rbp // Copy value of stack pointer into base pointer.
CFI_DEF_CFA_REGISTER(rbp)
movl %edx, %r10d
- addl LITERAL(60), %edx // Reserve space for return addr, StackReference<method>, rbp,
- // r8 and r9 in frame.
- andl LITERAL(0xFFFFFFF0), %edx // Align frame size to 16 bytes.
- subl LITERAL(32), %edx // Remove space for return address, rbp, r8 and r9.
- subq %rdx, %rsp // Reserve stack space for argument array.
+ addl LITERAL(100), %edx // Reserve space for return addr, StackReference<method>, rbp,
+ // r8, r9, rbx, r12, r13, r14, and r15 in frame.
+ andl LITERAL(0xFFFFFFF0), %edx // Align frame size to 16 bytes.
+ subl LITERAL(72), %edx // Remove space for return address, rbp, r8, r9, rbx, r12,
+ // r13, r14, and r15
+ subq %rdx, %rsp // Reserve stack space for argument array.
#if (STACK_REFERENCE_SIZE != 4)
#error "STACK_REFERENCE_SIZE(X86_64) size not as expected."
@@ -503,15 +509,15 @@
movl LITERAL(0), (%rsp) // Store NULL for method*
movl %r10d, %ecx // Place size of args in rcx.
- movq %rdi, %rax // RAX := method to be called
- movq %rsi, %r11 // R11 := arg_array
- leaq 4(%rsp), %rdi // Rdi is pointing just above the StackReference<method> in the
+ movq %rdi, %rax // rax := method to be called
+ movq %rsi, %r11 // r11 := arg_array
+ leaq 4(%rsp), %rdi // rdi is pointing just above the StackReference<method> in the
// stack arguments.
// Copy arg array into stack.
rep movsb // while (rcx--) { *rdi++ = *rsi++ }
- leaq 1(%r9), %r10 // R10 := shorty + 1 ; ie skip return arg character
- movq %rax, %rdi // RDI := method to be called
- movl (%r11), %esi // RSI := this pointer
+ leaq 1(%r9), %r10 // r10 := shorty + 1 ; ie skip return arg character
+ movq %rax, %rdi // rdi := method to be called
+ movl (%r11), %esi // rsi := this pointer
addq LITERAL(4), %r11 // arg_array++
LOOP_OVER_SHORTY_LOADING_GPRS rdx, edx, .Lgpr_setup_finished
LOOP_OVER_SHORTY_LOADING_GPRS rcx, ecx, .Lgpr_setup_finished
@@ -520,8 +526,12 @@
.Lgpr_setup_finished:
call *MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64(%rdi) // Call the method.
movq %rbp, %rsp // Restore stack pointer.
- CFI_DEF_CFA_REGISTER(rsp)
- POP r9 // Pop r9 - shorty*.
+ POP r15 // Pop r15
+ POP r14 // Pop r14
+ POP r13 // Pop r13
+ POP r12 // Pop r12
+ POP rbx // Pop rbx
+ POP r9 // Pop r9 - shorty*
POP r8 // Pop r8 - result*.
POP rbp // Pop rbp
cmpb LITERAL(68), (%r9) // Test if result type char == 'D'.
@@ -531,10 +541,10 @@
movq %rax, (%r8) // Store the result assuming its a long, int or Object*
ret
.Lreturn_double_quick:
- movsd %xmm0, (%r8) // Store the double floating point result.
+ movsd %xmm0, (%r8) // Store the double floating point result.
ret
.Lreturn_float_quick:
- movss %xmm0, (%r8) // Store the floating point result.
+ movss %xmm0, (%r8) // Store the floating point result.
ret
#endif // __APPLE__
END_FUNCTION art_quick_invoke_stub
@@ -571,30 +581,36 @@
PUSH rbp // Save rbp.
PUSH r8 // Save r8/result*.
PUSH r9 // Save r9/shorty*.
+ PUSH rbx // Save rbx
+ PUSH r12 // Save r12
+ PUSH r13 // Save r13
+ PUSH r14 // Save r14
+ PUSH r15 // Save r15
movq %rsp, %rbp // Copy value of stack pointer into base pointer.
CFI_DEF_CFA_REGISTER(rbp)
movl %edx, %r10d
- addl LITERAL(60), %edx // Reserve space for return addr, StackReference<method>, rbp,
- // r8 and r9 in frame.
- andl LITERAL(0xFFFFFFF0), %edx // Align frame size to 16 bytes.
- subl LITERAL(32), %edx // Remove space for return address, rbp, r8 and r9.
- subq %rdx, %rsp // Reserve stack space for argument array.
+ addl LITERAL(100), %edx // Reserve space for return addr, StackReference<method>, rbp,
+ // r8, r9, r12, r13, r14, and r15 in frame.
+ andl LITERAL(0xFFFFFFF0), %edx // Align frame size to 16 bytes.
+ subl LITERAL(72), %edx // Remove space for return address, rbp, r8, r9, rbx, r12,
+ // r13, r14, and r15.
+ subq %rdx, %rsp // Reserve stack space for argument array.
#if (STACK_REFERENCE_SIZE != 4)
#error "STACK_REFERENCE_SIZE(X86_64) size not as expected."
#endif
- movl LITERAL(0), (%rsp) // Store NULL for method*
+ movl LITERAL(0), (%rsp) // Store NULL for method*
- movl %r10d, %ecx // Place size of args in rcx.
- movq %rdi, %rax // RAX := method to be called
- movq %rsi, %r11 // R11 := arg_array
- leaq 4(%rsp), %rdi // Rdi is pointing just above the StackReference<method> in the
- // stack arguments.
+ movl %r10d, %ecx // Place size of args in rcx.
+ movq %rdi, %rax // rax := method to be called
+ movq %rsi, %r11 // r11 := arg_array
+ leaq 4(%rsp), %rdi // rdi is pointing just above the StackReference<method> in the
+ // stack arguments.
// Copy arg array into stack.
- rep movsb // while (rcx--) { *rdi++ = *rsi++ }
- leaq 1(%r9), %r10 // R10 := shorty + 1 ; ie skip return arg character
- movq %rax, %rdi // RDI := method to be called
+ rep movsb // while (rcx--) { *rdi++ = *rsi++ }
+ leaq 1(%r9), %r10 // r10 := shorty + 1 ; ie skip return arg character
+ movq %rax, %rdi // rdi := method to be called
LOOP_OVER_SHORTY_LOADING_GPRS rsi, esi, .Lgpr_setup_finished2
LOOP_OVER_SHORTY_LOADING_GPRS rdx, edx, .Lgpr_setup_finished2
LOOP_OVER_SHORTY_LOADING_GPRS rcx, ecx, .Lgpr_setup_finished2
@@ -602,22 +618,26 @@
LOOP_OVER_SHORTY_LOADING_GPRS r9, r9d, .Lgpr_setup_finished2
.Lgpr_setup_finished2:
call *MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64(%rdi) // Call the method.
- movq %rbp, %rsp // Restore stack pointer.
- CFI_DEF_CFA_REGISTER(rsp)
- POP r9 // Pop r9 - shorty*.
- POP r8 // Pop r8 - result*.
- POP rbp // Pop rbp
- cmpb LITERAL(68), (%r9) // Test if result type char == 'D'.
+ movq %rbp, %rsp // Restore stack pointer.
+ POP r15 // Pop r15
+ POP r14 // Pop r14
+ POP r13 // Pop r13
+ POP r12 // Pop r12
+ POP rbx // Pop rbx
+ POP r9 // Pop r9 - shorty*.
+ POP r8 // Pop r8 - result*.
+ POP rbp // Pop rbp
+ cmpb LITERAL(68), (%r9) // Test if result type char == 'D'.
je .Lreturn_double_quick2
- cmpb LITERAL(70), (%r9) // Test if result type char == 'F'.
+ cmpb LITERAL(70), (%r9) // Test if result type char == 'F'.
je .Lreturn_float_quick2
- movq %rax, (%r8) // Store the result assuming its a long, int or Object*
+ movq %rax, (%r8) // Store the result assuming its a long, int or Object*
ret
.Lreturn_double_quick2:
- movsd %xmm0, (%r8) // Store the double floating point result.
+ movsd %xmm0, (%r8) // Store the double floating point result.
ret
.Lreturn_float_quick2:
- movss %xmm0, (%r8) // Store the floating point result.
+ movss %xmm0, (%r8) // Store the floating point result.
ret
#endif // __APPLE__
END_FUNCTION art_quick_invoke_static_stub
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 43a2c59..7905bb4 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -173,7 +173,8 @@
// stored in between objects.
// Remaining size is for the new alloc space.
const size_t growth_limit = growth_limit_ - size;
- const size_t capacity = Capacity() - size;
+ // Use mem map limit in case error for clear growth limit.
+ const size_t capacity = NonGrowthLimitCapacity() - size;
VLOG(heap) << "Begin " << reinterpret_cast<const void*>(begin_) << "\n"
<< "End " << reinterpret_cast<const void*>(End()) << "\n"
<< "Size " << size << "\n"
diff --git a/runtime/oat.cc b/runtime/oat.cc
index 9307598..c223e2e 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -20,12 +20,13 @@
#include <zlib.h>
#include "arch/instruction_set_features.h"
+#include "base/stringprintf.h"
#include "utils.h"
namespace art {
-const uint8_t OatHeader::kOatMagic[] = { 'o', 'a', 't', '\n' };
-const uint8_t OatHeader::kOatVersion[] = { '0', '5', '2', '\0' };
+constexpr uint8_t OatHeader::kOatMagic[4];
+constexpr uint8_t OatHeader::kOatVersion[4];
static size_t ComputeOatHeaderSize(const SafeMap<std::string, std::string>* variable_data) {
size_t estimate = 0U;
@@ -67,6 +68,13 @@
uint32_t image_file_location_oat_checksum,
uint32_t image_file_location_oat_data_begin,
const SafeMap<std::string, std::string>* variable_data) {
+ // Don't want asserts in header as they would be checked in each file that includes it. But the
+ // fields are private, so we check inside a method.
+ static_assert(sizeof(magic_) == sizeof(kOatMagic),
+ "Oat magic and magic_ have different lengths.");
+ static_assert(sizeof(version_) == sizeof(kOatVersion),
+ "Oat version and version_ have different lengths.");
+
memcpy(magic_, kOatMagic, sizeof(kOatMagic));
memcpy(version_, kOatVersion, sizeof(kOatVersion));
executable_offset_ = 0;
@@ -127,6 +135,28 @@
return true;
}
+std::string OatHeader::GetValidationErrorMessage() const {
+ if (memcmp(magic_, kOatMagic, sizeof(kOatMagic)) != 0) {
+ static_assert(sizeof(kOatMagic) == 4, "kOatMagic has unexpected length");
+ return StringPrintf("Invalid oat magic, expected 0x%x%x%x%x, got 0x%x%x%x%x.",
+ kOatMagic[0], kOatMagic[1], kOatMagic[2], kOatMagic[3],
+ magic_[0], magic_[1], magic_[2], magic_[3]);
+ }
+ if (memcmp(version_, kOatVersion, sizeof(kOatVersion)) != 0) {
+ static_assert(sizeof(kOatVersion) == 4, "kOatVersion has unexpected length");
+ return StringPrintf("Invalid oat version, expected 0x%x%x%x%x, got 0x%x%x%x%x.",
+ kOatVersion[0], kOatVersion[1], kOatVersion[2], kOatVersion[3],
+ version_[0], version_[1], version_[2], version_[3]);
+ }
+ if (!IsAligned<kPageSize>(executable_offset_)) {
+ return "Executable offset not page-aligned.";
+ }
+ if (!IsAligned<kPageSize>(image_patch_delta_)) {
+ return "Image patch delta not page-aligned.";
+ }
+ return "";
+}
+
const char* OatHeader::GetMagic() const {
CHECK(IsValid());
return reinterpret_cast<const char*>(magic_);
diff --git a/runtime/oat.h b/runtime/oat.h
index 6098fbd..f218482 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -31,8 +31,8 @@
class PACKED(4) OatHeader {
public:
- static const uint8_t kOatMagic[4];
- static const uint8_t kOatVersion[4];
+ static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
+ static constexpr uint8_t kOatVersion[] = { '0', '4', '5', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
@@ -47,6 +47,7 @@
const SafeMap<std::string, std::string>* variable_data);
bool IsValid() const;
+ std::string GetValidationErrorMessage() const;
const char* GetMagic() const;
uint32_t GetChecksum() const;
void UpdateChecksum(const void* data, size_t length);
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index aa85ff0..1c6cc8b 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -227,7 +227,9 @@
bool OatFile::Setup(std::string* error_msg) {
if (!GetOatHeader().IsValid()) {
- *error_msg = StringPrintf("Invalid oat magic for '%s'", GetLocation().c_str());
+ std::string cause = GetOatHeader().GetValidationErrorMessage();
+ *error_msg = StringPrintf("Invalid oat header for '%s': %s", GetLocation().c_str(),
+ cause.c_str());
return false;
}
const uint8_t* oat = Begin();
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 3e6c86b..1b992d5 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -691,11 +691,16 @@
return false;
}
}
- // If not set, background collector type defaults to homogeneous compaction
- // if not low memory mode, semispace otherwise.
+ // If not set, background collector type defaults to homogeneous compaction.
+ // If foreground is GSS, use GSS as background collector.
+ // If not low memory mode, semispace otherwise.
if (background_collector_type_ == gc::kCollectorTypeNone) {
- background_collector_type_ = low_memory_mode_ ?
- gc::kCollectorTypeSS : gc::kCollectorTypeHomogeneousSpaceCompact;
+ if (collector_type_ != gc::kCollectorTypeGSS) {
+ background_collector_type_ = low_memory_mode_ ?
+ gc::kCollectorTypeSS : gc::kCollectorTypeHomogeneousSpaceCompact;
+ } else {
+ background_collector_type_ = collector_type_;
+ }
}
// If a reference to the dalvik core.jar snuck in, replace it with
@@ -722,9 +727,6 @@
if (heap_growth_limit_ == 0) {
heap_growth_limit_ = heap_maximum_size_;
}
- if (background_collector_type_ == gc::kCollectorTypeNone) {
- background_collector_type_ = collector_type_;
- }
return true;
} // NOLINT(readability/fn_size)
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index abd2553..07e2ec0 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -973,10 +973,9 @@
// Most JNI libraries can just use System.loadLibrary, but libcore can't because it's
// the library that implements System.loadLibrary!
{
- std::string mapped_name(StringPrintf(OS_SHARED_LIB_FORMAT_STR, "javacore"));
std::string reason;
- if (!java_vm_->LoadNativeLibrary(env, mapped_name, nullptr, &reason)) {
- LOG(FATAL) << "LoadNativeLibrary failed for \"" << mapped_name << "\": " << reason;
+ if (!java_vm_->LoadNativeLibrary(env, "libjavacore.so", nullptr, &reason)) {
+ LOG(FATAL) << "LoadNativeLibrary failed for \"libjavacore.so\": " << reason;
}
}
diff --git a/test/129-ThreadGetId/expected.txt b/test/129-ThreadGetId/expected.txt
new file mode 100644
index 0000000..134d8d0
--- /dev/null
+++ b/test/129-ThreadGetId/expected.txt
@@ -0,0 +1 @@
+Finishing
diff --git a/test/129-ThreadGetId/info.txt b/test/129-ThreadGetId/info.txt
new file mode 100644
index 0000000..443062d
--- /dev/null
+++ b/test/129-ThreadGetId/info.txt
@@ -0,0 +1 @@
+Regression test for b/18661622
diff --git a/test/129-ThreadGetId/src/Main.java b/test/129-ThreadGetId/src/Main.java
new file mode 100644
index 0000000..9934bba
--- /dev/null
+++ b/test/129-ThreadGetId/src/Main.java
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Map;
+
+public class Main implements Runnable {
+ static final int numberOfThreads = 5;
+ static final int totalOperations = 1000;
+
+ public static void main(String[] args) throws Exception {
+ final Thread[] threads = new Thread[numberOfThreads];
+ for (int t = 0; t < threads.length; t++) {
+ threads[t] = new Thread(new Main());
+ threads[t].start();
+ }
+ for (Thread t : threads) {
+ t.join();
+ }
+ System.out.println("Finishing");
+ }
+
+ public void test_getId() {
+ if (Thread.currentThread().getId() <= 0) {
+ System.out.println("current thread's ID is not positive");
+ }
+ // Check all the current threads for positive IDs.
+ Map<Thread, StackTraceElement[]> stMap = Thread.getAllStackTraces();
+ for (Thread thread : stMap.keySet()) {
+ if (thread.getId() <= 0) {
+ System.out.println("thread's ID is not positive: " + thread.getName());
+ }
+ }
+ }
+
+ public void run() {
+ for (int i = 0; i < totalOperations; ++i) {
+ test_getId();
+ }
+ }
+}