summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--build/Android.common_build.mk16
-rw-r--r--build/Android.gtest.mk4
-rw-r--r--cmdline/unit.h1
-rw-r--r--compiler/compiled_method.cc9
-rw-r--r--compiler/compiled_method.h10
-rw-r--r--compiler/dex/gvn_dead_code_elimination.cc1
-rw-r--r--compiler/dex/local_value_numbering.cc6
-rw-r--r--compiler/dex/quick/arm64/utility_arm64.cc5
-rw-r--r--compiler/dex/quick/codegen_util.cc2
-rw-r--r--compiler/dex/quick/mips/utility_mips.cc4
-rw-r--r--compiler/driver/compiler_driver.cc14
-rw-r--r--compiler/driver/compiler_driver.h1
-rw-r--r--compiler/elf_builder.h2
-rw-r--r--compiler/elf_writer_quick.cc5
-rw-r--r--compiler/jni/quick/mips64/calling_convention_mips64.cc7
-rw-r--r--compiler/linker/arm/relative_patcher_thumb2_test.cc24
-rw-r--r--compiler/linker/arm64/relative_patcher_arm64_test.cc32
-rw-r--r--compiler/linker/relative_patcher.cc5
-rw-r--r--compiler/linker/relative_patcher_test.h2
-rw-r--r--compiler/linker/x86/relative_patcher_x86_test.cc8
-rw-r--r--compiler/linker/x86_64/relative_patcher_x86_64_test.cc10
-rw-r--r--compiler/optimizing/bounds_check_elimination.cc1
-rw-r--r--compiler/optimizing/code_generator.cc2
-rw-r--r--compiler/optimizing/code_generator_arm.cc5
-rw-r--r--compiler/optimizing/code_generator_x86.cc4
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc4
-rw-r--r--compiler/optimizing/intrinsics.cc6
-rw-r--r--compiler/optimizing/intrinsics_arm.cc4
-rw-r--r--compiler/utils/arm/assembler_arm.cc6
-rw-r--r--compiler/utils/arm/assembler_thumb2.cc3
-rw-r--r--compiler/utils/arm64/assembler_arm64.h2
-rw-r--r--compiler/utils/array_ref.h2
-rw-r--r--compiler/utils/mips64/assembler_mips64.cc2
-rw-r--r--disassembler/Android.mk2
-rw-r--r--disassembler/disassembler_mips.cc1
-rw-r--r--disassembler/disassembler_mips64.cc1
-rw-r--r--runtime/arch/arm64/instruction_set_features_arm64.cc2
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S6
-rw-r--r--runtime/arch/instruction_set_features.cc6
-rw-r--r--runtime/arch/instruction_set_features.h2
-rw-r--r--runtime/arch/mips64/instruction_set_features_mips64.cc3
-rw-r--r--runtime/arch/mips64/jni_entrypoints_mips64.S16
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S8
-rw-r--r--runtime/arch/x86/instruction_set_features_x86.cc37
-rw-r--r--runtime/arch/x86/instruction_set_features_x86_test.cc36
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S6
-rw-r--r--runtime/asm_support.h9
-rw-r--r--runtime/base/allocator.h4
-rw-r--r--runtime/base/arena_containers.h1
-rw-r--r--runtime/base/macros.h2
-rw-r--r--runtime/class_linker.cc31
-rw-r--r--runtime/class_linker.h4
-rw-r--r--runtime/class_linker_test.cc12
-rw-r--r--runtime/debugger.cc14
-rw-r--r--runtime/debugger.h6
-rw-r--r--runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_throw_entrypoints.cc23
-rw-r--r--runtime/gc/collector/concurrent_copying.cc104
-rw-r--r--runtime/gc/collector/concurrent_copying.h10
-rw-r--r--runtime/gc/collector/garbage_collector.h3
-rw-r--r--runtime/gc/collector/mark_compact.cc59
-rw-r--r--runtime/gc/collector/mark_compact.h10
-rw-r--r--runtime/gc/collector/mark_sweep.cc88
-rw-r--r--runtime/gc/collector/mark_sweep.h26
-rw-r--r--runtime/gc/collector/semi_space.cc23
-rw-r--r--runtime/gc/collector/semi_space.h10
-rw-r--r--runtime/gc/heap.cc50
-rw-r--r--runtime/gc/space/malloc_space.cc1
-rw-r--r--runtime/gc/task_processor.cc1
-rw-r--r--runtime/gc_root-inl.h14
-rw-r--r--runtime/gc_root.h140
-rw-r--r--runtime/handle.h18
-rw-r--r--runtime/handle_scope.h2
-rw-r--r--runtime/hprof/hprof.cc143
-rw-r--r--runtime/indirect_reference_table.cc12
-rw-r--r--runtime/indirect_reference_table.h8
-rw-r--r--runtime/instrumentation.cc5
-rw-r--r--runtime/instrumentation.h2
-rw-r--r--runtime/intern_table.cc13
-rw-r--r--runtime/intern_table.h4
-rw-r--r--runtime/interpreter/unstarted_runtime.cc4
-rw-r--r--runtime/java_vm_ext.cc27
-rw-r--r--runtime/java_vm_ext.h2
-rw-r--r--runtime/mirror/array-inl.h4
-rw-r--r--runtime/mirror/array.h3
-rw-r--r--runtime/mirror/art_field.cc4
-rw-r--r--runtime/mirror/art_field.h2
-rw-r--r--runtime/mirror/art_method.cc4
-rw-r--r--runtime/mirror/art_method.h2
-rw-r--r--runtime/mirror/class.cc4
-rw-r--r--runtime/mirror/class.h2
-rw-r--r--runtime/mirror/field.cc6
-rw-r--r--runtime/mirror/field.h2
-rw-r--r--runtime/mirror/object.h3
-rw-r--r--runtime/mirror/object_reference.h22
-rw-r--r--runtime/mirror/reference.cc7
-rw-r--r--runtime/mirror/reference.h2
-rw-r--r--runtime/mirror/stack_trace_element.cc4
-rw-r--r--runtime/mirror/stack_trace_element.h2
-rw-r--r--runtime/mirror/string.cc4
-rw-r--r--runtime/mirror/string.h2
-rw-r--r--runtime/mirror/throwable.cc4
-rw-r--r--runtime/mirror/throwable.h2
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc25
-rw-r--r--runtime/oat.h4
-rw-r--r--runtime/oat_file.h3
-rw-r--r--runtime/parsed_options.cc1
-rw-r--r--runtime/quick_exception_handler.h2
-rw-r--r--runtime/read_barrier-inl.h42
-rw-r--r--runtime/read_barrier.h8
-rw-r--r--runtime/reference_table.cc5
-rw-r--r--runtime/reference_table.h3
-rw-r--r--runtime/runtime.cc96
-rw-r--r--runtime/runtime.h16
-rw-r--r--runtime/safe_map.h1
-rw-r--r--runtime/stack.h14
-rw-r--r--runtime/thread.cc128
-rw-r--r--runtime/thread.h4
-rw-r--r--runtime/thread_list.cc4
-rw-r--r--runtime/thread_list.h2
-rw-r--r--runtime/trace.cc3
-rw-r--r--runtime/transaction.cc36
-rw-r--r--runtime/transaction.h12
-rw-r--r--runtime/verifier/method_verifier.cc9
-rw-r--r--runtime/verifier/method_verifier.h4
-rw-r--r--runtime/verifier/reg_type.cc4
-rw-r--r--runtime/verifier/reg_type.h2
-rw-r--r--runtime/verifier/reg_type_cache.cc32
-rw-r--r--runtime/verifier/reg_type_cache.h4
-rw-r--r--sigchainlib/sigchain_dummy.cc7
130 files changed, 1091 insertions, 687 deletions
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index 8f00298..c60e75b 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -135,6 +135,22 @@ art_clang_cflags += -Wfloat-equal
# Enable warning of converting ints to void*.
art_clang_cflags += -Wint-to-void-pointer-cast
+# Enable warning of wrong unused annotations.
+art_clang_cflags += -Wused-but-marked-unused
+
+# Enable warning for deprecated language features.
+art_clang_cflags += -Wdeprecated
+
+# Enable warning for unreachable break & return.
+art_clang_cflags += -Wunreachable-code-break -Wunreachable-code-return
+
+# Enable missing-noreturn only on non-Mac. As lots of things are not implemented for Apple, it's
+# a pain.
+ifneq ($(HOST_OS),darwin)
+ art_clang_cflags += -Wmissing-noreturn
+endif
+
+
# GCC-only warnings.
art_gcc_cflags := -Wunused-but-set-parameter
# Suggest const: too many false positives, but good for a trial run.
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 1e7988a..0e2dad9 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -257,6 +257,7 @@ LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.gtest.mk
$(eval $(call set-target-local-clang-vars))
$(eval $(call set-target-local-cflags-vars,debug))
+LOCAL_CLANG_CFLAGS += -Wno-used-but-marked-unused -Wno-deprecated -Wno-missing-noreturn # gtest issue
include $(BUILD_SHARED_LIBRARY)
include $(CLEAR_VARS)
@@ -271,6 +272,7 @@ LOCAL_STATIC_LIBRARIES := libgtest_host
LOCAL_LDLIBS += -ldl -lpthread
LOCAL_MULTILIB := both
LOCAL_CLANG := $(ART_HOST_CLANG)
+LOCAL_CLANG_CFLAGS += -Wno-used-but-marked-unused -Wno-deprecated -Wno-missing-noreturn # gtest issue
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.gtest.mk
include $(BUILD_HOST_SHARED_LIBRARY)
@@ -423,6 +425,7 @@ define define-art-gtest
LOCAL_MODULE_PATH_32 := $$(ART_TARGET_NATIVETEST_OUT)/$$(ART_TARGET_ARCH_32)
LOCAL_MODULE_PATH_64 := $$(ART_TARGET_NATIVETEST_OUT)/$$(ART_TARGET_ARCH_64)
LOCAL_MULTILIB := both
+ LOCAL_CLANG_CFLAGS += -Wno-used-but-marked-unused -Wno-deprecated -Wno-missing-noreturn # gtest issue
include $$(BUILD_EXECUTABLE)
library_path :=
2nd_library_path :=
@@ -461,6 +464,7 @@ test-art-target-gtest-$$(art_gtest_name): $$(ART_TEST_TARGET_GTEST_$$(art_gtest_
LOCAL_MULTILIB := both
LOCAL_MODULE_STEM_32 := $$(art_gtest_name)32
LOCAL_MODULE_STEM_64 := $$(art_gtest_name)64
+ LOCAL_CLANG_CFLAGS += -Wno-used-but-marked-unused -Wno-deprecated -Wno-missing-noreturn # gtest issue
include $$(BUILD_HOST_EXECUTABLE)
ART_TEST_HOST_GTEST_$$(art_gtest_name)_RULES :=
diff --git a/cmdline/unit.h b/cmdline/unit.h
index 6b53b18..ad6a03d 100644
--- a/cmdline/unit.h
+++ b/cmdline/unit.h
@@ -24,6 +24,7 @@ struct Unit {
// Avoid 'Conditional jump or move depends on uninitialised value(s)' errors
// when running valgrind by specifying a user-defined constructor.
Unit() {}
+ Unit(const Unit&) = default;
~Unit() {}
bool operator==(Unit) const {
return true;
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index 03370db..eeed877 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -132,7 +132,7 @@ CompiledMethod::CompiledMethod(CompilerDriver* driver,
const ArrayRef<const uint8_t>& vmap_table,
const ArrayRef<const uint8_t>& native_gc_map,
const ArrayRef<const uint8_t>& cfi_info,
- const ArrayRef<LinkerPatch>& patches)
+ const ArrayRef<const LinkerPatch>& patches)
: CompiledCode(driver, instruction_set, quick_code, !driver->DedupeEnabled()),
owns_arrays_(!driver->DedupeEnabled()),
frame_size_in_bytes_(frame_size_in_bytes), core_spill_mask_(core_spill_mask),
@@ -179,7 +179,7 @@ CompiledMethod* CompiledMethod::SwapAllocCompiledMethod(
const ArrayRef<const uint8_t>& vmap_table,
const ArrayRef<const uint8_t>& native_gc_map,
const ArrayRef<const uint8_t>& cfi_info,
- const ArrayRef<LinkerPatch>& patches) {
+ const ArrayRef<const LinkerPatch>& patches) {
SwapAllocator<CompiledMethod> alloc(driver->GetSwapSpaceAllocator());
CompiledMethod* ret = alloc.allocate(1);
alloc.construct(ret, driver, instruction_set, quick_code, frame_size_in_bytes, core_spill_mask,
@@ -200,7 +200,8 @@ CompiledMethod* CompiledMethod::SwapAllocCompiledMethodStackMap(
CompiledMethod* ret = alloc.allocate(1);
alloc.construct(ret, driver, instruction_set, quick_code, frame_size_in_bytes, core_spill_mask,
fp_spill_mask, nullptr, ArrayRef<const uint8_t>(), stack_map,
- ArrayRef<const uint8_t>(), ArrayRef<const uint8_t>(), ArrayRef<LinkerPatch>());
+ ArrayRef<const uint8_t>(), ArrayRef<const uint8_t>(),
+ ArrayRef<const LinkerPatch>());
return ret;
}
@@ -217,7 +218,7 @@ CompiledMethod* CompiledMethod::SwapAllocCompiledMethodCFI(
alloc.construct(ret, driver, instruction_set, quick_code, frame_size_in_bytes, core_spill_mask,
fp_spill_mask, nullptr, ArrayRef<const uint8_t>(),
ArrayRef<const uint8_t>(), ArrayRef<const uint8_t>(),
- cfi_info, ArrayRef<LinkerPatch>());
+ cfi_info, ArrayRef<const LinkerPatch>());
return ret;
}
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index 7497b17..506b47b 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -320,7 +320,7 @@ class CompiledMethod FINAL : public CompiledCode {
const ArrayRef<const uint8_t>& vmap_table,
const ArrayRef<const uint8_t>& native_gc_map,
const ArrayRef<const uint8_t>& cfi_info,
- const ArrayRef<LinkerPatch>& patches = ArrayRef<LinkerPatch>());
+ const ArrayRef<const LinkerPatch>& patches = ArrayRef<const LinkerPatch>());
virtual ~CompiledMethod();
@@ -336,7 +336,7 @@ class CompiledMethod FINAL : public CompiledCode {
const ArrayRef<const uint8_t>& vmap_table,
const ArrayRef<const uint8_t>& native_gc_map,
const ArrayRef<const uint8_t>& cfi_info,
- const ArrayRef<LinkerPatch>& patches = ArrayRef<LinkerPatch>());
+ const ArrayRef<const LinkerPatch>& patches = ArrayRef<const LinkerPatch>());
static CompiledMethod* SwapAllocCompiledMethodStackMap(
CompilerDriver* driver,
@@ -391,8 +391,8 @@ class CompiledMethod FINAL : public CompiledCode {
return cfi_info_;
}
- const SwapVector<LinkerPatch>& GetPatches() const {
- return patches_;
+ ArrayRef<const LinkerPatch> GetPatches() const {
+ return ArrayRef<const LinkerPatch>(patches_);
}
private:
@@ -417,7 +417,7 @@ class CompiledMethod FINAL : public CompiledCode {
// For quick code, a FDE entry for the debug_frame section.
SwapVector<uint8_t>* cfi_info_;
// For quick code, linker patches needed by the method.
- SwapVector<LinkerPatch> patches_;
+ const SwapVector<LinkerPatch> patches_;
};
} // namespace art
diff --git a/compiler/dex/gvn_dead_code_elimination.cc b/compiler/dex/gvn_dead_code_elimination.cc
index 2d4c18f..ec12221 100644
--- a/compiler/dex/gvn_dead_code_elimination.cc
+++ b/compiler/dex/gvn_dead_code_elimination.cc
@@ -1357,7 +1357,6 @@ bool GvnDeadCodeElimination::RecordMIR(MIR* mir) {
default:
LOG(FATAL) << "Unexpected opcode: " << opcode;
UNREACHABLE();
- break;
}
if (mir->ssa_rep->num_defs != 0) {
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index dc222b5..cdf5e38 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -166,9 +166,9 @@ class LocalValueNumbering::AliasingArrayVersions {
return gvn->LookupValue(kAliasingArrayOp, type, location, memory_version);
}
- static uint16_t LookupMergeValue(GlobalValueNumbering* gvn ATTRIBUTE_UNUSED,
+ static uint16_t LookupMergeValue(GlobalValueNumbering* gvn,
const LocalValueNumbering* lvn,
- uint16_t type ATTRIBUTE_UNUSED, uint16_t location) {
+ uint16_t type, uint16_t location) {
// If the location is non-aliasing in lvn, use the non-aliasing value.
uint16_t array = gvn->GetArrayLocationBase(location);
if (lvn->IsNonAliasingArray(array, type)) {
@@ -182,8 +182,6 @@ class LocalValueNumbering::AliasingArrayVersions {
static bool HasNewBaseVersion(GlobalValueNumbering* gvn ATTRIBUTE_UNUSED,
const LocalValueNumbering* lvn,
uint16_t type ATTRIBUTE_UNUSED) {
- UNUSED(gvn);
- UNUSED(type);
return lvn->global_memory_version_ == lvn->merge_new_memory_version_;
}
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index f48290d..e9ad8ba 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -589,13 +589,11 @@ LIR* Arm64Mir2Lir::OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r
DCHECK_EQ(shift, 0);
// Binary, but rm is encoded twice.
return NewLIR2(kA64Rev2rr | wide, r_dest_src1.GetReg(), r_src2.GetReg());
- break;
case kOpRevsh:
// Binary, but rm is encoded twice.
NewLIR2(kA64Rev162rr | wide, r_dest_src1.GetReg(), r_src2.GetReg());
// "sxth r1, r2" is "sbfm r1, r2, #0, #15"
return NewLIR4(kA64Sbfm4rrdd | wide, r_dest_src1.GetReg(), r_dest_src1.GetReg(), 0, 15);
- break;
case kOp2Byte:
DCHECK_EQ(shift, ENCODE_NO_SHIFT);
// "sbfx r1, r2, #imm1, #imm2" is "sbfm r1, r2, #imm1, #(imm1 + imm2 - 1)".
@@ -645,10 +643,9 @@ LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage
// Note: intentional fallthrough
case kOpSub:
return OpRegRegRegExtend(op, r_dest_src1, r_dest_src1, r_src2, ext, amount);
- break;
default:
LOG(FATAL) << "Bad Opcode: " << opcode;
- break;
+ UNREACHABLE();
}
DCHECK(!IsPseudoLirOp(opcode));
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 483a5d0..f944c11 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -1171,7 +1171,7 @@ CompiledMethod* Mir2Lir::GetCompiledMethod() {
ArrayRef<const uint8_t>(vmap_encoder.GetData()),
ArrayRef<const uint8_t>(native_gc_map_),
cfi_ref,
- ArrayRef<LinkerPatch>(patches_));
+ ArrayRef<const LinkerPatch>(patches_));
}
size_t Mir2Lir::GetMaxPossibleCompilerTemps() const {
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index bf0e0fc..8ab5422 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -283,9 +283,9 @@ LIR* MipsMir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
break;
case kOpBx:
return NewLIR2(kMipsJalr, rZERO, r_dest_src.GetReg());
- break;
default:
LOG(FATAL) << "Bad case in OpReg";
+ UNREACHABLE();
}
return NewLIR2(opcode, cu_->target64 ? rRAd : rRA, r_dest_src.GetReg());
}
@@ -295,8 +295,8 @@ LIR* MipsMir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
} else {
LOG(FATAL) << "Bad case in OpRegImm";
+ UNREACHABLE();
}
- UNREACHABLE();
}
LIR* MipsMir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) {
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index f52f50e..f6b217a 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -350,6 +350,7 @@ CompilerDriver::CompilerDriver(const CompilerOptions* compiler_options,
verification_results_(verification_results),
method_inliner_map_(method_inliner_map),
compiler_(Compiler::Create(this, compiler_kind)),
+ compiler_kind_(compiler_kind),
instruction_set_(instruction_set),
instruction_set_features_(instruction_set_features),
freezing_constructor_lock_("freezing constructor lock"),
@@ -2214,10 +2215,8 @@ void CompilerDriver::CompileMethod(Thread* self, const DexFile::CodeItem* code_i
InstructionSetHasGenericJniStub(instruction_set_)) {
// Leaving this empty will trigger the generic JNI version
} else {
- if (instruction_set_ != kMips64) { // Use generic JNI for Mips64 (temporarily).
- compiled_method = compiler_->JniCompile(access_flags, method_idx, dex_file);
- CHECK(compiled_method != nullptr);
- }
+ compiled_method = compiler_->JniCompile(access_flags, method_idx, dex_file);
+ CHECK(compiled_method != nullptr);
}
} else if ((access_flags & kAccAbstract) != 0) {
// Abstract methods don't have code.
@@ -2272,8 +2271,11 @@ void CompilerDriver::CompileMethod(Thread* self, const DexFile::CodeItem* code_i
DCHECK(GetCompiledMethod(method_ref) != nullptr) << PrettyMethod(method_idx, dex_file);
}
- // Done compiling, delete the verified method to reduce native memory usage.
- verification_results_->RemoveVerifiedMethod(method_ref);
+ // Done compiling, delete the verified method to reduce native memory usage. Do not delete in
+ // optimizing compiler, which may need the verified method again for inlining.
+ if (compiler_kind_ != Compiler::kOptimizing) {
+ verification_results_->RemoveVerifiedMethod(method_ref);
+ }
if (self->IsExceptionPending()) {
ScopedObjectAccess soa(self);
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index efcaae4..edd1bd2 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -557,6 +557,7 @@ class CompilerDriver {
DexFileToMethodInlinerMap* const method_inliner_map_;
std::unique_ptr<Compiler> compiler_;
+ Compiler::Kind compiler_kind_;
const InstructionSet instruction_set_;
const InstructionSetFeatures* const instruction_set_features_;
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index 9ab3602..124ed03 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -40,6 +40,7 @@ class ElfSectionBuilder : public ValueObject {
section_.sh_addralign = align;
section_.sh_entsize = entsize;
}
+ ElfSectionBuilder(const ElfSectionBuilder&) = default;
~ElfSectionBuilder() {}
@@ -144,6 +145,7 @@ class ElfRawSectionBuilder FINAL : public ElfSectionBuilder<Elf_Word, Elf_Sword,
: ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>(sec_name, type, flags, link, info, align,
entsize) {
}
+ ElfRawSectionBuilder(const ElfRawSectionBuilder&) = default;
~ElfRawSectionBuilder() {}
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index 3ce19ab..24cb364 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -490,14 +490,11 @@ static void FillInCFIInformation(OatWriter* oat_writer,
int code_factor_bits_ = 0;
int isa = -1;
switch (oat_writer->GetOatHeader().GetInstructionSet()) {
+ case kArm: // arm actually means thumb2.
case kThumb2:
code_factor_bits_ = 1; // 16-bit instuctions
isa = 1; // DW_ISA_ARM_thumb.
break;
- case kArm:
- code_factor_bits_ = 1; // 16-bit instructions
- isa = 2; // DW_ISA_ARM_arm.
- break;
case kArm64:
case kMips:
case kMips64:
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.cc b/compiler/jni/quick/mips64/calling_convention_mips64.cc
index 17325d6..d446867 100644
--- a/compiler/jni/quick/mips64/calling_convention_mips64.cc
+++ b/compiler/jni/quick/mips64/calling_convention_mips64.cc
@@ -126,25 +126,20 @@ const ManagedRegisterEntrySpills& Mips64ManagedRuntimeCallingConvention::EntrySp
Mips64JniCallingConvention::Mips64JniCallingConvention(bool is_static, bool is_synchronized,
const char* shorty)
: JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
- callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S0));
- callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S1));
callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S2));
callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S3));
callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S4));
callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S5));
callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S6));
callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S7));
-
callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(GP));
- callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(SP));
callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S8));
}
uint32_t Mips64JniCallingConvention::CoreSpillMask() const {
// Compute spill mask to agree with callee saves initialized in the constructor
uint32_t result = 0;
- result = 1 << S0 | 1 << S1 | 1 << S2 | 1 << S3 | 1 << S4 | 1 << S5 | 1 << S6 |
- 1 << S7 | 1 << GP | 1 << SP | 1 << S8;
+ result = 1 << S2 | 1 << S3 | 1 << S4 | 1 << S5 | 1 << S6 | 1 << S7 | 1 << GP | 1 << S8 | 1 << RA;
return result;
}
diff --git a/compiler/linker/arm/relative_patcher_thumb2_test.cc b/compiler/linker/arm/relative_patcher_thumb2_test.cc
index abdfd6d..3b397cc 100644
--- a/compiler/linker/arm/relative_patcher_thumb2_test.cc
+++ b/compiler/linker/arm/relative_patcher_thumb2_test.cc
@@ -39,14 +39,14 @@ class Thumb2RelativePatcherTest : public RelativePatcherTest {
static constexpr uint32_t kBlMinusMax = 0xf400d000;
bool Create2MethodsWithGap(const ArrayRef<const uint8_t>& method1_code,
- const ArrayRef<LinkerPatch>& method1_patches,
+ const ArrayRef<const LinkerPatch>& method1_patches,
const ArrayRef<const uint8_t>& method3_code,
- const ArrayRef<LinkerPatch>& method3_patches,
+ const ArrayRef<const LinkerPatch>& method3_patches,
uint32_t distance_without_thunks) {
CHECK_EQ(distance_without_thunks % kArmAlignment, 0u);
const uint32_t method1_offset =
CompiledCode::AlignCode(kTrampolineSize, kThumb2) + sizeof(OatQuickMethodHeader);
- AddCompiledMethod(MethodRef(1u), method1_code, ArrayRef<LinkerPatch>(method1_patches));
+ AddCompiledMethod(MethodRef(1u), method1_code, method1_patches);
// We want to put the method3 at a very precise offset.
const uint32_t method3_offset = method1_offset + distance_without_thunks;
@@ -59,7 +59,7 @@ class Thumb2RelativePatcherTest : public RelativePatcherTest {
const uint32_t method2_size = (method3_offset - sizeof(OatQuickMethodHeader) - method2_offset);
std::vector<uint8_t> method2_raw_code(method2_size);
ArrayRef<const uint8_t> method2_code(method2_raw_code);
- AddCompiledMethod(MethodRef(2u), method2_code, ArrayRef<LinkerPatch>());
+ AddCompiledMethod(MethodRef(2u), method2_code, ArrayRef<const LinkerPatch>());
AddCompiledMethod(MethodRef(3u), method3_code, method3_patches);
@@ -139,7 +139,7 @@ TEST_F(Thumb2RelativePatcherTest, CallSelf) {
LinkerPatch patches[] = {
LinkerPatch::RelativeCodePatch(0u, nullptr, 1u),
};
- AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<LinkerPatch>(patches));
+ AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<const LinkerPatch>(patches));
Link();
static const uint8_t expected_code[] = {
@@ -152,11 +152,11 @@ TEST_F(Thumb2RelativePatcherTest, CallOther) {
LinkerPatch method1_patches[] = {
LinkerPatch::RelativeCodePatch(0u, nullptr, 2u),
};
- AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<LinkerPatch>(method1_patches));
+ AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<const LinkerPatch>(method1_patches));
LinkerPatch method2_patches[] = {
LinkerPatch::RelativeCodePatch(0u, nullptr, 1u),
};
- AddCompiledMethod(MethodRef(2u), kCallCode, ArrayRef<LinkerPatch>(method2_patches));
+ AddCompiledMethod(MethodRef(2u), kCallCode, ArrayRef<const LinkerPatch>(method2_patches));
Link();
uint32_t method1_offset = GetMethodOffset(1u);
@@ -179,7 +179,7 @@ TEST_F(Thumb2RelativePatcherTest, CallTrampoline) {
LinkerPatch patches[] = {
LinkerPatch::RelativeCodePatch(0u, nullptr, 2u),
};
- AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<LinkerPatch>(patches));
+ AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<const LinkerPatch>(patches));
Link();
uint32_t method1_offset = GetMethodOffset(1u);
@@ -201,7 +201,7 @@ TEST_F(Thumb2RelativePatcherTest, CallOtherAlmostTooFarAfter) {
constexpr uint32_t max_positive_disp = 16 * MB - 2u + 4u /* PC adjustment */;
bool thunk_in_gap = Create2MethodsWithGap(method1_code, method1_patches,
- kNopCode, ArrayRef<LinkerPatch>(),
+ kNopCode, ArrayRef<const LinkerPatch>(),
bl_offset_in_method1 + max_positive_disp);
ASSERT_FALSE(thunk_in_gap); // There should be no thunk.
@@ -220,7 +220,7 @@ TEST_F(Thumb2RelativePatcherTest, CallOtherAlmostTooFarBefore) {
};
constexpr uint32_t just_over_max_negative_disp = 16 * MB - 4u /* PC adjustment */;
- bool thunk_in_gap = Create2MethodsWithGap(kNopCode, ArrayRef<LinkerPatch>(),
+ bool thunk_in_gap = Create2MethodsWithGap(kNopCode, ArrayRef<const LinkerPatch>(),
method3_code, method3_patches,
just_over_max_negative_disp - bl_offset_in_method3);
ASSERT_FALSE(thunk_in_gap); // There should be no thunk.
@@ -241,7 +241,7 @@ TEST_F(Thumb2RelativePatcherTest, CallOtherJustTooFarAfter) {
constexpr uint32_t just_over_max_positive_disp = 16 * MB + 4u /* PC adjustment */;
bool thunk_in_gap = Create2MethodsWithGap(method1_code, method1_patches,
- kNopCode, ArrayRef<LinkerPatch>(),
+ kNopCode, ArrayRef<const LinkerPatch>(),
bl_offset_in_method1 + just_over_max_positive_disp);
ASSERT_TRUE(thunk_in_gap);
@@ -269,7 +269,7 @@ TEST_F(Thumb2RelativePatcherTest, CallOtherJustTooFarBefore) {
};
constexpr uint32_t just_over_max_negative_disp = 16 * MB + 2 - 4u /* PC adjustment */;
- bool thunk_in_gap = Create2MethodsWithGap(kNopCode, ArrayRef<LinkerPatch>(),
+ bool thunk_in_gap = Create2MethodsWithGap(kNopCode, ArrayRef<const LinkerPatch>(),
method3_code, method3_patches,
just_over_max_negative_disp - bl_offset_in_method3);
ASSERT_FALSE(thunk_in_gap); // There should be a thunk but it should be after the method2.
diff --git a/compiler/linker/arm64/relative_patcher_arm64_test.cc b/compiler/linker/arm64/relative_patcher_arm64_test.cc
index b039936..b36e6d0 100644
--- a/compiler/linker/arm64/relative_patcher_arm64_test.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64_test.cc
@@ -43,14 +43,14 @@ class Arm64RelativePatcherTest : public RelativePatcherTest {
static constexpr uint32_t kLdurInsn = 0xf840405fu;
uint32_t Create2MethodsWithGap(const ArrayRef<const uint8_t>& method1_code,
- const ArrayRef<LinkerPatch>& method1_patches,
+ const ArrayRef<const LinkerPatch>& method1_patches,
const ArrayRef<const uint8_t>& last_method_code,
- const ArrayRef<LinkerPatch>& last_method_patches,
+ const ArrayRef<const LinkerPatch>& last_method_patches,
uint32_t distance_without_thunks) {
CHECK_EQ(distance_without_thunks % kArm64Alignment, 0u);
const uint32_t method1_offset =
CompiledCode::AlignCode(kTrampolineSize, kArm64) + sizeof(OatQuickMethodHeader);
- AddCompiledMethod(MethodRef(1u), method1_code, ArrayRef<LinkerPatch>(method1_patches));
+ AddCompiledMethod(MethodRef(1u), method1_code, method1_patches);
const uint32_t gap_start =
CompiledCode::AlignCode(method1_offset + method1_code.size(), kArm64);
@@ -70,13 +70,13 @@ class Arm64RelativePatcherTest : public RelativePatcherTest {
uint32_t chunk_code_size = kSmallChunkSize - sizeof(OatQuickMethodHeader);
gap_code.resize(chunk_code_size, 0u);
AddCompiledMethod(MethodRef(method_idx), ArrayRef<const uint8_t>(gap_code),
- ArrayRef<LinkerPatch>());
+ ArrayRef<const LinkerPatch>());
method_idx += 1u;
}
uint32_t chunk_code_size = gap_size - sizeof(OatQuickMethodHeader);
gap_code.resize(chunk_code_size, 0u);
AddCompiledMethod(MethodRef(method_idx), ArrayRef<const uint8_t>(gap_code),
- ArrayRef<LinkerPatch>());
+ ArrayRef<const LinkerPatch>());
method_idx += 1u;
// Add the last method and link
@@ -174,7 +174,8 @@ class Arm64RelativePatcherTest : public RelativePatcherTest {
LinkerPatch::DexCacheArrayPatch(num_nops * 4u , nullptr, num_nops * 4u, element_offset),
LinkerPatch::DexCacheArrayPatch(num_nops * 4u + 4u, nullptr, num_nops * 4u, element_offset),
};
- AddCompiledMethod(MethodRef(1u), ArrayRef<const uint8_t>(code), ArrayRef<LinkerPatch>(patches));
+ AddCompiledMethod(MethodRef(1u), ArrayRef<const uint8_t>(code),
+ ArrayRef<const LinkerPatch>(patches));
Link();
uint32_t method1_offset = GetMethodOffset(1u);
@@ -202,7 +203,8 @@ class Arm64RelativePatcherTest : public RelativePatcherTest {
LinkerPatch::DexCacheArrayPatch(num_nops * 4u , nullptr, num_nops * 4u, element_offset),
LinkerPatch::DexCacheArrayPatch(num_nops * 4u + 8u, nullptr, num_nops * 4u, element_offset),
};
- AddCompiledMethod(MethodRef(1u), ArrayRef<const uint8_t>(code), ArrayRef<LinkerPatch>(patches));
+ AddCompiledMethod(MethodRef(1u), ArrayRef<const uint8_t>(code),
+ ArrayRef<const LinkerPatch>(patches));
Link();
}
@@ -300,7 +302,7 @@ TEST_F(Arm64RelativePatcherTestDefault, CallSelf) {
LinkerPatch patches[] = {
LinkerPatch::RelativeCodePatch(0u, nullptr, 1u),
};
- AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<LinkerPatch>(patches));
+ AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<const LinkerPatch>(patches));
Link();
static const uint8_t expected_code[] = {
@@ -313,11 +315,11 @@ TEST_F(Arm64RelativePatcherTestDefault, CallOther) {
LinkerPatch method1_patches[] = {
LinkerPatch::RelativeCodePatch(0u, nullptr, 2u),
};
- AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<LinkerPatch>(method1_patches));
+ AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<const LinkerPatch>(method1_patches));
LinkerPatch method2_patches[] = {
LinkerPatch::RelativeCodePatch(0u, nullptr, 1u),
};
- AddCompiledMethod(MethodRef(2u), kCallCode, ArrayRef<LinkerPatch>(method2_patches));
+ AddCompiledMethod(MethodRef(2u), kCallCode, ArrayRef<const LinkerPatch>(method2_patches));
Link();
uint32_t method1_offset = GetMethodOffset(1u);
@@ -340,7 +342,7 @@ TEST_F(Arm64RelativePatcherTestDefault, CallTrampoline) {
LinkerPatch patches[] = {
LinkerPatch::RelativeCodePatch(0u, nullptr, 2u),
};
- AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<LinkerPatch>(patches));
+ AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<const LinkerPatch>(patches));
Link();
uint32_t method1_offset = GetMethodOffset(1u);
@@ -363,7 +365,7 @@ TEST_F(Arm64RelativePatcherTestDefault, CallOtherAlmostTooFarAfter) {
constexpr uint32_t max_positive_disp = 128 * MB - 4u;
uint32_t last_method_idx = Create2MethodsWithGap(method1_code, method1_patches,
- kNopCode, ArrayRef<LinkerPatch>(),
+ kNopCode, ArrayRef<const LinkerPatch>(),
bl_offset_in_method1 + max_positive_disp);
ASSERT_EQ(expected_last_method_idx, last_method_idx);
@@ -386,7 +388,7 @@ TEST_F(Arm64RelativePatcherTestDefault, CallOtherAlmostTooFarBefore) {
};
constexpr uint32_t max_negative_disp = 128 * MB;
- uint32_t last_method_idx = Create2MethodsWithGap(kNopCode, ArrayRef<LinkerPatch>(),
+ uint32_t last_method_idx = Create2MethodsWithGap(kNopCode, ArrayRef<const LinkerPatch>(),
last_method_code, last_method_patches,
max_negative_disp - bl_offset_in_last_method);
uint32_t method1_offset = GetMethodOffset(1u);
@@ -411,7 +413,7 @@ TEST_F(Arm64RelativePatcherTestDefault, CallOtherJustTooFarAfter) {
constexpr uint32_t just_over_max_positive_disp = 128 * MB;
uint32_t last_method_idx = Create2MethodsWithGap(
- method1_code, method1_patches, kNopCode, ArrayRef<LinkerPatch>(),
+ method1_code, method1_patches, kNopCode, ArrayRef<const LinkerPatch>(),
bl_offset_in_method1 + just_over_max_positive_disp);
ASSERT_EQ(expected_last_method_idx, last_method_idx);
@@ -440,7 +442,7 @@ TEST_F(Arm64RelativePatcherTestDefault, CallOtherJustTooFarBefore) {
constexpr uint32_t just_over_max_negative_disp = 128 * MB + 4;
uint32_t last_method_idx = Create2MethodsWithGap(
- kNopCode, ArrayRef<LinkerPatch>(), last_method_code, last_method_patches,
+ kNopCode, ArrayRef<const LinkerPatch>(), last_method_code, last_method_patches,
just_over_max_negative_disp - bl_offset_in_last_method);
uint32_t method1_offset = GetMethodOffset(1u);
uint32_t last_method_offset = GetMethodOffset(last_method_idx);
diff --git a/compiler/linker/relative_patcher.cc b/compiler/linker/relative_patcher.cc
index 8ee87aa..89aed95 100644
--- a/compiler/linker/relative_patcher.cc
+++ b/compiler/linker/relative_patcher.cc
@@ -67,22 +67,17 @@ std::unique_ptr<RelativePatcher> RelativePatcher::Create(
switch (instruction_set) {
case kX86:
return std::unique_ptr<RelativePatcher>(new X86RelativePatcher());
- break;
case kX86_64:
return std::unique_ptr<RelativePatcher>(new X86_64RelativePatcher());
- break;
case kArm:
// Fall through: we generate Thumb2 code for "arm".
case kThumb2:
return std::unique_ptr<RelativePatcher>(new Thumb2RelativePatcher(provider));
- break;
case kArm64:
return std::unique_ptr<RelativePatcher>(
new Arm64RelativePatcher(provider, features->AsArm64InstructionSetFeatures()));
- break;
default:
return std::unique_ptr<RelativePatcher>(new RelativePatcherNone);
- break;
}
}
diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h
index 08167b3..70630f3 100644
--- a/compiler/linker/relative_patcher_test.h
+++ b/compiler/linker/relative_patcher_test.h
@@ -69,7 +69,7 @@ class RelativePatcherTest : public testing::Test {
void AddCompiledMethod(MethodReference method_ref,
const ArrayRef<const uint8_t>& code,
- const ArrayRef<LinkerPatch>& patches) {
+ const ArrayRef<const LinkerPatch>& patches) {
compiled_method_refs_.push_back(method_ref);
compiled_methods_.emplace_back(new CompiledMethod(
&driver_, instruction_set_, code,
diff --git a/compiler/linker/x86/relative_patcher_x86_test.cc b/compiler/linker/x86/relative_patcher_x86_test.cc
index c18a743..15ac47e 100644
--- a/compiler/linker/x86/relative_patcher_x86_test.cc
+++ b/compiler/linker/x86/relative_patcher_x86_test.cc
@@ -45,7 +45,7 @@ TEST_F(X86RelativePatcherTest, CallSelf) {
LinkerPatch patches[] = {
LinkerPatch::RelativeCodePatch(kCallCode.size() - 4u, nullptr, 1u),
};
- AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<LinkerPatch>(patches));
+ AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<const LinkerPatch>(patches));
Link();
static const uint8_t expected_code[] = {
@@ -58,11 +58,11 @@ TEST_F(X86RelativePatcherTest, CallOther) {
LinkerPatch method1_patches[] = {
LinkerPatch::RelativeCodePatch(kCallCode.size() - 4u, nullptr, 2u),
};
- AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<LinkerPatch>(method1_patches));
+ AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<const LinkerPatch>(method1_patches));
LinkerPatch method2_patches[] = {
LinkerPatch::RelativeCodePatch(kCallCode.size() - 4u, nullptr, 1u),
};
- AddCompiledMethod(MethodRef(2u), kCallCode, ArrayRef<LinkerPatch>(method2_patches));
+ AddCompiledMethod(MethodRef(2u), kCallCode, ArrayRef<const LinkerPatch>(method2_patches));
Link();
uint32_t method1_offset = GetMethodOffset(1u);
@@ -87,7 +87,7 @@ TEST_F(X86RelativePatcherTest, CallTrampoline) {
LinkerPatch patches[] = {
LinkerPatch::RelativeCodePatch(kCallCode.size() - 4u, nullptr, 2u),
};
- AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<LinkerPatch>(patches));
+ AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<const LinkerPatch>(patches));
Link();
auto result = method_offset_map_.FindMethodOffset(MethodRef(1));
diff --git a/compiler/linker/x86_64/relative_patcher_x86_64_test.cc b/compiler/linker/x86_64/relative_patcher_x86_64_test.cc
index 9d9529c..36e0f01 100644
--- a/compiler/linker/x86_64/relative_patcher_x86_64_test.cc
+++ b/compiler/linker/x86_64/relative_patcher_x86_64_test.cc
@@ -55,7 +55,7 @@ TEST_F(X86_64RelativePatcherTest, CallSelf) {
LinkerPatch patches[] = {
LinkerPatch::RelativeCodePatch(kCallCode.size() - 4u, nullptr, 1u),
};
- AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<LinkerPatch>(patches));
+ AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<const LinkerPatch>(patches));
Link();
static const uint8_t expected_code[] = {
@@ -68,11 +68,11 @@ TEST_F(X86_64RelativePatcherTest, CallOther) {
LinkerPatch method1_patches[] = {
LinkerPatch::RelativeCodePatch(kCallCode.size() - 4u, nullptr, 2u),
};
- AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<LinkerPatch>(method1_patches));
+ AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<const LinkerPatch>(method1_patches));
LinkerPatch method2_patches[] = {
LinkerPatch::RelativeCodePatch(kCallCode.size() - 4u, nullptr, 1u),
};
- AddCompiledMethod(MethodRef(2u), kCallCode, ArrayRef<LinkerPatch>(method2_patches));
+ AddCompiledMethod(MethodRef(2u), kCallCode, ArrayRef<const LinkerPatch>(method2_patches));
Link();
uint32_t method1_offset = GetMethodOffset(1u);
@@ -97,7 +97,7 @@ TEST_F(X86_64RelativePatcherTest, CallTrampoline) {
LinkerPatch patches[] = {
LinkerPatch::RelativeCodePatch(kCallCode.size() - 4u, nullptr, 2u),
};
- AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<LinkerPatch>(patches));
+ AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<const LinkerPatch>(patches));
Link();
auto result = method_offset_map_.FindMethodOffset(MethodRef(1u));
@@ -117,7 +117,7 @@ TEST_F(X86_64RelativePatcherTest, DexCacheReference) {
LinkerPatch patches[] = {
LinkerPatch::DexCacheArrayPatch(kDexCacheLoadCode.size() - 4u, nullptr, 0u, kElementOffset),
};
- AddCompiledMethod(MethodRef(1u), kDexCacheLoadCode, ArrayRef<LinkerPatch>(patches));
+ AddCompiledMethod(MethodRef(1u), kDexCacheLoadCode, ArrayRef<const LinkerPatch>(patches));
Link();
auto result = method_offset_map_.FindMethodOffset(MethodRef(1u));
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index dce02f7..6511120 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -239,7 +239,6 @@ class ValueBound : public ValueObject {
*underflow = true;
return Min();
}
- return ValueBound(instruction_, new_constant);
}
private:
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 9b1ef17..da28dc7 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -132,7 +132,6 @@ size_t CodeGenerator::FindFreeEntry(bool* array, size_t length) {
}
LOG(FATAL) << "Could not find a register in baseline register allocator";
UNREACHABLE();
- return -1;
}
size_t CodeGenerator::FindTwoFreeConsecutiveAlignedEntries(bool* array, size_t length) {
@@ -145,7 +144,6 @@ size_t CodeGenerator::FindTwoFreeConsecutiveAlignedEntries(bool* array, size_t l
}
LOG(FATAL) << "Could not find a register in baseline register allocator";
UNREACHABLE();
- return -1;
}
void CodeGenerator::InitializeCodeGeneration(size_t number_of_spill_slots,
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index f5e4df1..cfc798a 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -562,7 +562,6 @@ Location CodeGeneratorARM::GetStackLocation(HLoadLocal* load) const {
case Primitive::kPrimLong:
case Primitive::kPrimDouble:
return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
- break;
case Primitive::kPrimInt:
case Primitive::kPrimNot:
@@ -575,10 +574,11 @@ Location CodeGeneratorARM::GetStackLocation(HLoadLocal* load) const {
case Primitive::kPrimShort:
case Primitive::kPrimVoid:
LOG(FATAL) << "Unexpected type " << load->GetType();
+ UNREACHABLE();
}
LOG(FATAL) << "Unreachable";
- return Location();
+ UNREACHABLE();
}
Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type) {
@@ -683,7 +683,6 @@ Location InvokeDexCallingConventionVisitor::GetReturnLocation(Primitive::Type ty
return Location();
}
UNREACHABLE();
- return Location();
}
void CodeGeneratorARM::Move32(Location destination, Location source) {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index f79dbc3..92b62e2 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -514,7 +514,6 @@ Location CodeGeneratorX86::GetStackLocation(HLoadLocal* load) const {
case Primitive::kPrimLong:
case Primitive::kPrimDouble:
return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
- break;
case Primitive::kPrimInt:
case Primitive::kPrimNot:
@@ -527,10 +526,11 @@ Location CodeGeneratorX86::GetStackLocation(HLoadLocal* load) const {
case Primitive::kPrimShort:
case Primitive::kPrimVoid:
LOG(FATAL) << "Unexpected type " << load->GetType();
+ UNREACHABLE();
}
LOG(FATAL) << "Unreachable";
- return Location();
+ UNREACHABLE();
}
Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type) {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 9958451..cdbc778 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -555,7 +555,6 @@ Location CodeGeneratorX86_64::GetStackLocation(HLoadLocal* load) const {
case Primitive::kPrimLong:
case Primitive::kPrimDouble:
return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
- break;
case Primitive::kPrimInt:
case Primitive::kPrimNot:
@@ -568,10 +567,11 @@ Location CodeGeneratorX86_64::GetStackLocation(HLoadLocal* load) const {
case Primitive::kPrimShort:
case Primitive::kPrimVoid:
LOG(FATAL) << "Unexpected type " << load->GetType();
+ UNREACHABLE();
}
LOG(FATAL) << "Unreachable";
- return Location();
+ UNREACHABLE();
}
void CodeGeneratorX86_64::Move(Location destination, Location source) {
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 628a844..20aa45f 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -90,7 +90,6 @@ static Intrinsics GetIntrinsic(InlineMethod method) {
LOG(FATAL) << "Unknown/unsupported op size " << method.d.data;
UNREACHABLE();
}
- break;
case kIntrinsicReverseBytes:
switch (GetType(method.d.data, true)) {
case Primitive::kPrimShort:
@@ -103,7 +102,6 @@ static Intrinsics GetIntrinsic(InlineMethod method) {
LOG(FATAL) << "Unknown/unsupported op size " << method.d.data;
UNREACHABLE();
}
- break;
// Abs.
case kIntrinsicAbsDouble:
@@ -166,7 +164,6 @@ static Intrinsics GetIntrinsic(InlineMethod method) {
LOG(FATAL) << "Unknown/unsupported op size " << method.d.data;
UNREACHABLE();
}
- break;
// Memory.poke.
case kIntrinsicPoke:
@@ -183,7 +180,6 @@ static Intrinsics GetIntrinsic(InlineMethod method) {
LOG(FATAL) << "Unknown/unsupported op size " << method.d.data;
UNREACHABLE();
}
- break;
// String.
case kIntrinsicCharAt:
@@ -211,7 +207,6 @@ static Intrinsics GetIntrinsic(InlineMethod method) {
LOG(FATAL) << "Unknown/unsupported op size " << method.d.data;
UNREACHABLE();
}
- break;
case kIntrinsicUnsafeGet: {
const bool is_volatile = (method.d.data & kIntrinsicFlagIsVolatile);
switch (GetType(method.d.data, false)) {
@@ -225,7 +220,6 @@ static Intrinsics GetIntrinsic(InlineMethod method) {
LOG(FATAL) << "Unknown/unsupported op size " << method.d.data;
UNREACHABLE();
}
- break;
}
case kIntrinsicUnsafePut: {
enum Sync { kNoSync, kVolatile, kOrdered };
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 33176f0..94e27e9 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -776,10 +776,10 @@ static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGenerat
__ mov(out, ShifterOperand(0), CC);
}
-void IntrinsicLocationsBuilderARM::VisitUnsafeCASInt(HInvoke* invoke ATTRIBUTE_UNUSED) {
+void IntrinsicLocationsBuilderARM::VisitUnsafeCASInt(HInvoke* invoke) {
CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
}
-void IntrinsicLocationsBuilderARM::VisitUnsafeCASObject(HInvoke* invoke ATTRIBUTE_UNUSED) {
+void IntrinsicLocationsBuilderARM::VisitUnsafeCASObject(HInvoke* invoke) {
CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
}
void IntrinsicCodeGeneratorARM::VisitUnsafeCASInt(HInvoke* invoke) {
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index a02191b..8059289 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -89,7 +89,6 @@ uint32_t ShifterOperand::encodingArm() const {
} else {
return immed_;
}
- break;
case kRegister:
if (is_shift_) {
uint32_t shift_type;
@@ -121,7 +120,6 @@ uint32_t ShifterOperand::encodingArm() const {
// Simple register
return static_cast<uint32_t>(rm_);
}
- break;
default:
// Can't get here.
LOG(FATAL) << "Invalid shifter operand for ARM";
@@ -156,13 +154,11 @@ uint32_t ShifterOperand::encodingThumb() const {
// Simple register
return static_cast<uint32_t>(rm_);
}
- break;
default:
// Can't get here.
LOG(FATAL) << "Invalid shifter operand for thumb";
- return 0;
+ UNREACHABLE();
}
- return 0;
}
uint32_t Address::encodingArm() const {
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index a894319..6286b10 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -683,7 +683,7 @@ void Thumb2Assembler::Emit16(int16_t value) {
bool Thumb2Assembler::Is32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
Opcode opcode,
- bool set_cc ATTRIBUTE_UNUSED,
+ bool set_cc,
Register rn,
Register rd,
const ShifterOperand& so) {
@@ -749,7 +749,6 @@ bool Thumb2Assembler::Is32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
break;
case TEQ:
return true;
- break;
case ADD:
case SUB:
break;
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 2031fe4..8973b9c 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -30,7 +30,9 @@
// TODO: make vixl clean wrt -Wshadow.
#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunknown-pragmas"
#pragma GCC diagnostic ignored "-Wshadow"
+#pragma GCC diagnostic ignored "-Wmissing-noreturn"
#include "vixl/a64/macro-assembler-a64.h"
#include "vixl/a64/disasm-a64.h"
#pragma GCC diagnostic pop
diff --git a/compiler/utils/array_ref.h b/compiler/utils/array_ref.h
index b1b0ee5..ff5a77c 100644
--- a/compiler/utils/array_ref.h
+++ b/compiler/utils/array_ref.h
@@ -89,6 +89,8 @@ class ArrayRef {
: array_(v.data()), size_(v.size()) {
}
+ ArrayRef(const ArrayRef&) = default;
+
// Assignment operators.
ArrayRef& operator=(const ArrayRef& other) {
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 233ae7d..388d274 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -1025,7 +1025,7 @@ void Mips64ExceptionSlowPath::Emit(Assembler* sasm) {
__ Move(A0, scratch_.AsGpuRegister());
// Set up call to Thread::Current()->pDeliverException
__ LoadFromOffset(kLoadDoubleword, T9, S1,
- QUICK_ENTRYPOINT_OFFSET(4, pDeliverException).Int32Value());
+ QUICK_ENTRYPOINT_OFFSET(8, pDeliverException).Int32Value());
__ Jr(T9);
// Call never returns
__ Break();
diff --git a/disassembler/Android.mk b/disassembler/Android.mk
index c9aa8c8..1cfd45a 100644
--- a/disassembler/Android.mk
+++ b/disassembler/Android.mk
@@ -81,6 +81,8 @@ define build-libart-disassembler
endif
LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime
+ LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)
+ LOCAL_MULTILIB := both
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index b27b555..e2b7341 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -228,7 +228,6 @@ size_t DisassemblerMips::Dump(std::ostream& os, const uint8_t* instr_ptr) {
}
continue; // No ", ".
}
- break;
case 'I': // Upper 16-bit immediate.
args << reinterpret_cast<void*>((instruction & 0xffff) << 16);
break;
diff --git a/disassembler/disassembler_mips64.cc b/disassembler/disassembler_mips64.cc
index ae0fc26..f1c7d8e 100644
--- a/disassembler/disassembler_mips64.cc
+++ b/disassembler/disassembler_mips64.cc
@@ -233,7 +233,6 @@ size_t DisassemblerMips64::Dump(std::ostream& os, const uint8_t* instr_ptr) {
}
continue; // No ", ".
}
- break;
case 'I': // Upper 16-bit immediate.
args << reinterpret_cast<void*>((instruction & 0xffff) << 16);
break;
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.cc b/runtime/arch/arm64/instruction_set_features_arm64.cc
index f8a9f9d..1f2ce02 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64.cc
@@ -25,7 +25,7 @@
namespace art {
const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromVariant(
- const std::string& variant ATTRIBUTE_UNUSED, std::string* error_msg ATTRIBUTE_UNUSED) {
+ const std::string& variant, std::string* error_msg) {
const bool smp = true; // Conservative default.
// Look for variants that need a fix for a53 erratum 835769.
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index ff57603..b4de879 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -32,7 +32,7 @@
// xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] .
THIS_LOAD_REQUIRES_READ_BARRIER
- ldr xIP0, [xIP0, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET ]
+ ldr wIP0, [xIP0, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET ]
sub sp, sp, #176
.cfi_adjust_cfa_offset 176
@@ -97,7 +97,7 @@
// xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] .
THIS_LOAD_REQUIRES_READ_BARRIER
- ldr xIP0, [xIP0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET ]
+ ldr wIP0, [xIP0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET ]
sub sp, sp, #96
.cfi_adjust_cfa_offset 96
@@ -266,7 +266,7 @@
// xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] .
THIS_LOAD_REQUIRES_READ_BARRIER
- ldr xIP0, [xIP0, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET ]
+ ldr wIP0, [xIP0, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET ]
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
diff --git a/runtime/arch/instruction_set_features.cc b/runtime/arch/instruction_set_features.cc
index db4b0b1..898f83a 100644
--- a/runtime/arch/instruction_set_features.cc
+++ b/runtime/arch/instruction_set_features.cc
@@ -288,10 +288,10 @@ const X86_64InstructionSetFeatures* InstructionSetFeatures::AsX86_64InstructionS
return down_cast<const X86_64InstructionSetFeatures*>(this);
}
-bool InstructionSetFeatures::FindVariantInArray(const char* variants[], size_t num_variants,
+bool InstructionSetFeatures::FindVariantInArray(const char* const variants[], size_t num_variants,
const std::string& variant) {
- const char** begin = variants;
- const char** end = begin + num_variants;
+ const char* const * begin = variants;
+ const char* const * end = begin + num_variants;
return std::find(begin, end, variant) != end;
}
diff --git a/runtime/arch/instruction_set_features.h b/runtime/arch/instruction_set_features.h
index e4513ef..d10ae21 100644
--- a/runtime/arch/instruction_set_features.h
+++ b/runtime/arch/instruction_set_features.h
@@ -103,7 +103,7 @@ class InstructionSetFeatures {
explicit InstructionSetFeatures(bool smp) : smp_(smp) {}
// Returns true if variant appears in the array variants.
- static bool FindVariantInArray(const char* variants[], size_t num_variants,
+ static bool FindVariantInArray(const char* const variants[], size_t num_variants,
const std::string& variant);
// Add architecture specific features in sub-classes.
diff --git a/runtime/arch/mips64/instruction_set_features_mips64.cc b/runtime/arch/mips64/instruction_set_features_mips64.cc
index 26478cb..8c48a08 100644
--- a/runtime/arch/mips64/instruction_set_features_mips64.cc
+++ b/runtime/arch/mips64/instruction_set_features_mips64.cc
@@ -26,8 +26,7 @@ namespace art {
const Mips64InstructionSetFeatures* Mips64InstructionSetFeatures::FromVariant(
const std::string& variant, std::string* error_msg ATTRIBUTE_UNUSED) {
- // TODO: r6 variant.
- if (variant != "default") {
+ if (variant != "default" && variant != "mips64r6") {
std::ostringstream os;
LOG(WARNING) << "Unexpected CPU variant for Mips64 using defaults: " << variant;
}
diff --git a/runtime/arch/mips64/jni_entrypoints_mips64.S b/runtime/arch/mips64/jni_entrypoints_mips64.S
index 90fd3ee..1085666 100644
--- a/runtime/arch/mips64/jni_entrypoints_mips64.S
+++ b/runtime/arch/mips64/jni_entrypoints_mips64.S
@@ -28,21 +28,21 @@ ENTRY art_jni_dlsym_lookup_stub
.cfi_adjust_cfa_offset 80
sd $ra, 64($sp)
.cfi_rel_offset 31, 64
- sw $a7, 56($sp)
+ sd $a7, 56($sp)
.cfi_rel_offset 11, 56
- sw $a6, 48($sp)
+ sd $a6, 48($sp)
.cfi_rel_offset 10, 48
- sw $a5, 40($sp)
+ sd $a5, 40($sp)
.cfi_rel_offset 9, 40
- sw $a4, 32($sp)
+ sd $a4, 32($sp)
.cfi_rel_offset 8, 32
- sw $a3, 24($sp)
+ sd $a3, 24($sp)
.cfi_rel_offset 7, 24
- sw $a2, 16($sp)
+ sd $a2, 16($sp)
.cfi_rel_offset 6, 16
- sw $a1, 8($sp)
+ sd $a1, 8($sp)
.cfi_rel_offset 5, 8
- sw $a0, 0($sp)
+ sd $a0, 0($sp)
.cfi_rel_offset 4, 0
jal artFindNativeMethod # (Thread*)
move $a0, $s1 # pass Thread::Current()
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 697bf00..3d502e6 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -77,7 +77,7 @@
ld $v0, %got(_ZN3art7Runtime9instance_E)($gp)
ld $v0, 0($v0)
THIS_LOAD_REQUIRES_READ_BARRIER
- ld $v0, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET($v0)
+ lwu $v0, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET($v0)
sw $v0, 0($sp) # Place Method* at bottom of stack.
sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
.endm
@@ -120,7 +120,7 @@
ld $v0, %got(_ZN3art7Runtime9instance_E)($gp)
ld $v0, 0($v0)
THIS_LOAD_REQUIRES_READ_BARRIER
- ld $v0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET($v0)
+ lwu $v0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET($v0)
sw $v0, 0($sp) # Place Method* at bottom of stack.
sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
.endm
@@ -237,7 +237,7 @@
ld $v0, %got(_ZN3art7Runtime9instance_E)($gp)
ld $v0, 0($v0)
THIS_LOAD_REQUIRES_READ_BARRIER
- ld $v0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET($v0)
+ lwu $v0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET($v0)
sw $v0, 0($sp) # Place Method* at bottom of stack.
sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
.endm
@@ -248,7 +248,7 @@
ld $v0, %got(_ZN3art7Runtime9instance_E)($gp)
ld $v0, 0($v0)
THIS_LOAD_REQUIRES_READ_BARRIER
- ld $v0, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET($v0)
+ lwu $v0, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET($v0)
sw $v0, 0($sp) # Place Method* at bottom of stack.
sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
.endm
diff --git a/runtime/arch/x86/instruction_set_features_x86.cc b/runtime/arch/x86/instruction_set_features_x86.cc
index a12773d..8227633 100644
--- a/runtime/arch/x86/instruction_set_features_x86.cc
+++ b/runtime/arch/x86/instruction_set_features_x86.cc
@@ -25,20 +25,43 @@
namespace art {
+// Feature-support arrays.
+
+static constexpr const char* x86_known_variants[] = {
+ "atom",
+ "silvermont",
+};
+
+static constexpr const char* x86_variants_with_ssse3[] = {
+ "atom",
+ "silvermont",
+};
+
+static constexpr const char* x86_variants_with_sse4_1[] = {
+ "silvermont",
+};
+
+static constexpr const char* x86_variants_with_sse4_2[] = {
+ "silvermont",
+};
+
const X86InstructionSetFeatures* X86InstructionSetFeatures::FromVariant(
- const std::string& variant ATTRIBUTE_UNUSED, std::string* error_msg ATTRIBUTE_UNUSED,
+ const std::string& variant, std::string* error_msg ATTRIBUTE_UNUSED,
bool x86_64) {
- bool known_variant = false;
bool smp = true; // Conservative default.
- static const char* x86_variants_with_ssse3[] = {
- "atom"
- };
bool has_SSSE3 = FindVariantInArray(x86_variants_with_ssse3, arraysize(x86_variants_with_ssse3),
variant);
- bool has_SSE4_1 = false;
- bool has_SSE4_2 = false;
+ bool has_SSE4_1 = FindVariantInArray(x86_variants_with_sse4_1,
+ arraysize(x86_variants_with_sse4_1),
+ variant);
+ bool has_SSE4_2 = FindVariantInArray(x86_variants_with_sse4_2,
+ arraysize(x86_variants_with_sse4_2),
+ variant);
bool has_AVX = false;
bool has_AVX2 = false;
+
+ bool known_variant = FindVariantInArray(x86_known_variants, arraysize(x86_known_variants),
+ variant);
if (!known_variant && variant != "default") {
std::ostringstream os;
LOG(WARNING) << "Unexpected CPU variant for X86 using defaults: " << variant;
diff --git a/runtime/arch/x86/instruction_set_features_x86_test.cc b/runtime/arch/x86/instruction_set_features_x86_test.cc
index d231beb..25a406b 100644
--- a/runtime/arch/x86/instruction_set_features_x86_test.cc
+++ b/runtime/arch/x86/instruction_set_features_x86_test.cc
@@ -67,4 +67,40 @@ TEST(X86InstructionSetFeaturesTest, X86FeaturesFromAtomVariant) {
EXPECT_FALSE(x86_features->Equals(x86_default_features.get()));
}
+TEST(X86InstructionSetFeaturesTest, X86FeaturesFromSilvermontVariant) {
+ // Build features for a 32-bit x86 silvermont processor.
+ std::string error_msg;
+ std::unique_ptr<const InstructionSetFeatures> x86_features(
+ InstructionSetFeatures::FromVariant(kX86, "silvermont", &error_msg));
+ ASSERT_TRUE(x86_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(x86_features->GetInstructionSet(), kX86);
+ EXPECT_TRUE(x86_features->Equals(x86_features.get()));
+ EXPECT_STREQ("smp,ssse3,sse4.1,sse4.2,-avx,-avx2", x86_features->GetFeatureString().c_str());
+ EXPECT_EQ(x86_features->AsBitmap(), 15U);
+
+ // Build features for a 32-bit x86 default processor.
+ std::unique_ptr<const InstructionSetFeatures> x86_default_features(
+ InstructionSetFeatures::FromVariant(kX86, "default", &error_msg));
+ ASSERT_TRUE(x86_default_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(x86_default_features->GetInstructionSet(), kX86);
+ EXPECT_TRUE(x86_default_features->Equals(x86_default_features.get()));
+ EXPECT_STREQ("smp,-ssse3,-sse4.1,-sse4.2,-avx,-avx2",
+ x86_default_features->GetFeatureString().c_str());
+ EXPECT_EQ(x86_default_features->AsBitmap(), 1U);
+
+ // Build features for a 64-bit x86-64 silvermont processor.
+ std::unique_ptr<const InstructionSetFeatures> x86_64_features(
+ InstructionSetFeatures::FromVariant(kX86_64, "silvermont", &error_msg));
+ ASSERT_TRUE(x86_64_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(x86_64_features->GetInstructionSet(), kX86_64);
+ EXPECT_TRUE(x86_64_features->Equals(x86_64_features.get()));
+ EXPECT_STREQ("smp,ssse3,sse4.1,sse4.2,-avx,-avx2",
+ x86_64_features->GetFeatureString().c_str());
+ EXPECT_EQ(x86_64_features->AsBitmap(), 15U);
+
+ EXPECT_FALSE(x86_64_features->Equals(x86_features.get()));
+ EXPECT_FALSE(x86_64_features->Equals(x86_default_features.get()));
+ EXPECT_FALSE(x86_features->Equals(x86_default_features.get()));
+}
+
} // namespace art
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 3a448a5..ce21f01 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -67,7 +67,7 @@ MACRO0(SETUP_SAVE_ALL_CALLEE_SAVE_FRAME)
movq %xmm15, 32(%rsp)
// R10 := ArtMethod* for save all callee save frame method.
THIS_LOAD_REQUIRES_READ_BARRIER
- movq RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10
+ movl RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10d
// Store ArtMethod* to bottom of stack.
movq %r10, 0(%rsp)
// Store rsp as the top quick frame.
@@ -110,7 +110,7 @@ MACRO0(SETUP_REFS_ONLY_CALLEE_SAVE_FRAME)
movq %xmm15, 32(%rsp)
// R10 := ArtMethod* for refs only callee save frame method.
THIS_LOAD_REQUIRES_READ_BARRIER
- movq RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10
+ movl RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10d
// Store ArtMethod* to bottom of stack.
movq %r10, 0(%rsp)
// Store rsp as the stop quick frame.
@@ -170,7 +170,7 @@ MACRO0(SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME)
CFI_ADJUST_CFA_OFFSET(80 + 4 * 8)
// R10 := ArtMethod* for ref and args callee save frame method.
THIS_LOAD_REQUIRES_READ_BARRIER
- movq RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10
+ movl RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10d
// Save FPRs.
movq %xmm0, 16(%rsp)
movq %xmm1, 24(%rsp)
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 0d0017d..dba4af8 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -57,6 +57,11 @@ static inline void CheckAsmSupportOffsetsAndSizes() {
#define STACK_REFERENCE_SIZE 4
ADD_TEST_EQ(static_cast<size_t>(STACK_REFERENCE_SIZE), sizeof(art::StackReference<art::mirror::Object>))
+// Size of heap references
+#define COMPRESSED_REFERENCE_SIZE 4
+ADD_TEST_EQ(static_cast<size_t>(COMPRESSED_REFERENCE_SIZE),
+ sizeof(art::mirror::CompressedReference<art::mirror::Object>))
+
// Note: these callee save methods loads require read barriers.
// Offset of field Runtime::callee_save_methods_[kSaveAll]
#define RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET 0
@@ -64,12 +69,12 @@ ADD_TEST_EQ(static_cast<size_t>(RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET),
art::Runtime::GetCalleeSaveMethodOffset(art::Runtime::kSaveAll))
// Offset of field Runtime::callee_save_methods_[kRefsOnly]
-#define RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET __SIZEOF_POINTER__
+#define RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET COMPRESSED_REFERENCE_SIZE
ADD_TEST_EQ(static_cast<size_t>(RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET),
art::Runtime::GetCalleeSaveMethodOffset(art::Runtime::kRefsOnly))
// Offset of field Runtime::callee_save_methods_[kRefsAndArgs]
-#define RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET (2 * __SIZEOF_POINTER__)
+#define RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET (2 * COMPRESSED_REFERENCE_SIZE)
ADD_TEST_EQ(static_cast<size_t>(RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET),
art::Runtime::GetCalleeSaveMethodOffset(art::Runtime::kRefsAndArgs))
diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h
index 2d67c8b..07daa7e 100644
--- a/runtime/base/allocator.h
+++ b/runtime/base/allocator.h
@@ -114,12 +114,12 @@ class TrackingAllocatorImpl : public std::allocator<T> {
// Used internally by STL data structures.
template <class U>
- TrackingAllocatorImpl(const TrackingAllocatorImpl<U, kTag>& alloc) throw() {
+ TrackingAllocatorImpl(const TrackingAllocatorImpl<U, kTag>& alloc) noexcept {
UNUSED(alloc);
}
// Used internally by STL data structures.
- TrackingAllocatorImpl() throw() {
+ TrackingAllocatorImpl() noexcept {
static_assert(kTag < kAllocatorTagCount, "kTag must be less than kAllocatorTagCount");
}
diff --git a/runtime/base/arena_containers.h b/runtime/base/arena_containers.h
index e6fe6c0..d6c4a54 100644
--- a/runtime/base/arena_containers.h
+++ b/runtime/base/arena_containers.h
@@ -67,6 +67,7 @@ class ArenaAllocatorAdapterKindImpl<false> {
public:
// Not tracking allocations, ignore the supplied kind and arbitrarily provide kArenaAllocSTL.
explicit ArenaAllocatorAdapterKindImpl(ArenaAllocKind kind ATTRIBUTE_UNUSED) {}
+ ArenaAllocatorAdapterKindImpl(const ArenaAllocatorAdapterKindImpl&) = default;
ArenaAllocatorAdapterKindImpl& operator=(const ArenaAllocatorAdapterKindImpl&) = default;
ArenaAllocKind Kind() { return kArenaAllocSTL; }
};
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index 3a9de5f..6c33232 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -66,7 +66,7 @@ friend class test_set_name##_##individual_test##_Test
// A macro to disallow new and delete operators for a class. It goes in the private: declarations.
#define DISALLOW_ALLOCATION() \
public: \
- ALWAYS_INLINE void operator delete(void*, size_t) { UNREACHABLE(); } \
+ NO_RETURN ALWAYS_INLINE void operator delete(void*, size_t) { UNREACHABLE(); } \
private: \
void* operator new(size_t) = delete
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index cdd8e73..33d75d2 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -242,7 +242,10 @@ ClassLinker::ClassLinker(InternTable* intern_table)
quick_generic_jni_trampoline_(nullptr),
quick_to_interpreter_bridge_trampoline_(nullptr),
image_pointer_size_(sizeof(void*)) {
- memset(find_array_class_cache_, 0, kFindArrayCacheSize * sizeof(mirror::Class*));
+ CHECK(intern_table_ != nullptr);
+ for (size_t i = 0; i < kFindArrayCacheSize; ++i) {
+ find_array_class_cache_[i] = GcRoot<mirror::Class>(nullptr);
+ }
}
void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> boot_class_path) {
@@ -908,19 +911,20 @@ void ClassLinker::InitFromImage() {
VLOG(startup) << "ClassLinker::InitFromImage exiting";
}
-void ClassLinker::VisitClassRoots(RootCallback* callback, void* arg, VisitRootFlags flags) {
+void ClassLinker::VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags) {
WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
if ((flags & kVisitRootFlagAllRoots) != 0) {
+ BufferedRootVisitor<128> buffered_visitor(visitor, RootInfo(kRootStickyClass));
for (GcRoot<mirror::Class>& root : class_table_) {
- root.VisitRoot(callback, arg, RootInfo(kRootStickyClass));
+ buffered_visitor.VisitRoot(root);
}
for (GcRoot<mirror::Class>& root : pre_zygote_class_table_) {
- root.VisitRoot(callback, arg, RootInfo(kRootStickyClass));
+ buffered_visitor.VisitRoot(root);
}
} else if ((flags & kVisitRootFlagNewRoots) != 0) {
for (auto& root : new_class_roots_) {
mirror::Class* old_ref = root.Read<kWithoutReadBarrier>();
- root.VisitRoot(callback, arg, RootInfo(kRootStickyClass));
+ root.VisitRoot(visitor, RootInfo(kRootStickyClass));
mirror::Class* new_ref = root.Read<kWithoutReadBarrier>();
if (UNLIKELY(new_ref != old_ref)) {
// Uh ohes, GC moved a root in the log. Need to search the class_table and update the
@@ -947,18 +951,18 @@ void ClassLinker::VisitClassRoots(RootCallback* callback, void* arg, VisitRootFl
// Keep in sync with InitCallback. Anything we visit, we need to
// reinit references to when reinitializing a ClassLinker from a
// mapped image.
-void ClassLinker::VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags) {
- class_roots_.VisitRoot(callback, arg, RootInfo(kRootVMInternal));
- Thread* self = Thread::Current();
+void ClassLinker::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
+ class_roots_.VisitRoot(visitor, RootInfo(kRootVMInternal));
+ Thread* const self = Thread::Current();
{
ReaderMutexLock mu(self, dex_lock_);
if ((flags & kVisitRootFlagAllRoots) != 0) {
for (GcRoot<mirror::DexCache>& dex_cache : dex_caches_) {
- dex_cache.VisitRoot(callback, arg, RootInfo(kRootVMInternal));
+ dex_cache.VisitRoot(visitor, RootInfo(kRootVMInternal));
}
} else if ((flags & kVisitRootFlagNewRoots) != 0) {
for (size_t index : new_dex_cache_roots_) {
- dex_caches_[index].VisitRoot(callback, arg, RootInfo(kRootVMInternal));
+ dex_caches_[index].VisitRoot(visitor, RootInfo(kRootVMInternal));
}
}
if ((flags & kVisitRootFlagClearRootLog) != 0) {
@@ -970,11 +974,10 @@ void ClassLinker::VisitRoots(RootCallback* callback, void* arg, VisitRootFlags f
log_new_dex_caches_roots_ = false;
}
}
- VisitClassRoots(callback, arg, flags);
- array_iftable_.VisitRoot(callback, arg, RootInfo(kRootVMInternal));
- DCHECK(!array_iftable_.IsNull());
+ VisitClassRoots(visitor, flags);
+ array_iftable_.VisitRoot(visitor, RootInfo(kRootVMInternal));
for (size_t i = 0; i < kFindArrayCacheSize; ++i) {
- find_array_class_cache_[i].VisitRootIfNonNull(callback, arg, RootInfo(kRootVMInternal));
+ find_array_class_cache_[i].VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
}
}
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 69a5337..577fec2 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -299,10 +299,10 @@ class ClassLinker {
void VisitClassesWithoutClassesLock(ClassVisitor* visitor, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void VisitClassRoots(RootCallback* callback, void* arg, VisitRootFlags flags)
+ void VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags)
LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags)
+ void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
LOCKS_EXCLUDED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 3e727e7..3f6c5a0 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -358,7 +358,8 @@ class ClassLinkerTest : public CommonRuntimeTest {
const char* descriptor = dex.GetTypeDescriptor(type_id);
AssertDexFileClass(class_loader, descriptor);
}
- class_linker_->VisitRoots(TestRootVisitor, nullptr, kVisitRootFlagAllRoots);
+ TestRootVisitor visitor;
+ class_linker_->VisitRoots(&visitor, kVisitRootFlagAllRoots);
// Verify the dex cache has resolution methods in all resolved method slots
mirror::DexCache* dex_cache = class_linker_->FindDexCache(dex);
mirror::ObjectArray<mirror::ArtMethod>* resolved_methods = dex_cache->GetResolvedMethods();
@@ -367,9 +368,12 @@ class ClassLinkerTest : public CommonRuntimeTest {
}
}
- static void TestRootVisitor(mirror::Object** root, void*, const RootInfo&) {
- EXPECT_TRUE(*root != nullptr);
- }
+ class TestRootVisitor : public SingleRootVisitor {
+ public:
+ void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED) OVERRIDE {
+ EXPECT_TRUE(root != nullptr);
+ }
+ };
};
struct CheckOffset {
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index a767cf0..3f67f9e 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -344,16 +344,14 @@ uint32_t Dbg::instrumentation_events_ = 0;
// Breakpoints.
static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_);
-void DebugInvokeReq::VisitRoots(RootCallback* callback, void* arg, const RootInfo& root_info) {
- receiver.VisitRootIfNonNull(callback, arg, root_info); // null for static method call.
- klass.VisitRoot(callback, arg, root_info);
- method.VisitRoot(callback, arg, root_info);
+void DebugInvokeReq::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) {
+ receiver.VisitRootIfNonNull(visitor, root_info); // null for static method call.
+ klass.VisitRoot(visitor, root_info);
+ method.VisitRoot(visitor, root_info);
}
-void SingleStepControl::VisitRoots(RootCallback* callback, void* arg, const RootInfo& root_info) {
- if (method_ != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&method_), arg, root_info);
- }
+void SingleStepControl::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) {
+ visitor->VisitRootIfNonNull(reinterpret_cast<mirror::Object**>(&method_), root_info);
}
void SingleStepControl::AddDexPc(uint32_t dex_pc) {
diff --git a/runtime/debugger.h b/runtime/debugger.h
index 4f4a781..62eda62 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -81,7 +81,7 @@ struct DebugInvokeReq {
Mutex lock DEFAULT_MUTEX_ACQUIRED_AFTER;
ConditionVariable cond GUARDED_BY(lock);
- void VisitRoots(RootCallback* callback, void* arg, const RootInfo& root_info)
+ void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
@@ -117,7 +117,7 @@ class SingleStepControl {
return dex_pcs_;
}
- void VisitRoots(RootCallback* callback, void* arg, const RootInfo& root_info)
+ void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void AddDexPc(uint32_t dex_pc);
@@ -648,7 +648,7 @@ class Dbg {
static void DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void VisitRoots(RootCallback* callback, void* arg)
+ static void VisitRoots(RootVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
index d88d262..6a8aaf2 100644
--- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
@@ -27,7 +27,7 @@
namespace art {
-extern "C" void artDeoptimize(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+extern "C" NO_RETURN void artDeoptimize(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
self->SetException(Thread::GetDeoptimizationException());
self->QuickDeliverException();
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index 70317bb..9644b98 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -24,14 +24,14 @@
namespace art {
// Deliver an exception that's pending on thread helping set up a callee save frame on the way.
-extern "C" void artDeliverPendingExceptionFromCode(Thread* self)
+extern "C" NO_RETURN void artDeliverPendingExceptionFromCode(Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
self->QuickDeliverException();
}
// Called by generated call to throw an exception.
-extern "C" void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread* self)
+extern "C" NO_RETURN void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
/*
* exception may be NULL, in which case this routine should
@@ -50,7 +50,7 @@ extern "C" void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread
}
// Called by generated call to throw a NPE exception.
-extern "C" void artThrowNullPointerExceptionFromCode(Thread* self)
+extern "C" NO_RETURN void artThrowNullPointerExceptionFromCode(Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
self->NoteSignalBeingHandled();
@@ -60,7 +60,7 @@ extern "C" void artThrowNullPointerExceptionFromCode(Thread* self)
}
// Called by generated call to throw an arithmetic divide by zero exception.
-extern "C" void artThrowDivZeroFromCode(Thread* self)
+extern "C" NO_RETURN void artThrowDivZeroFromCode(Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowArithmeticExceptionDivideByZero();
@@ -68,14 +68,14 @@ extern "C" void artThrowDivZeroFromCode(Thread* self)
}
// Called by generated call to throw an array index out of bounds exception.
-extern "C" void artThrowArrayBoundsFromCode(int index, int length, Thread* self)
+extern "C" NO_RETURN void artThrowArrayBoundsFromCode(int index, int length, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowArrayIndexOutOfBoundsException(index, length);
self->QuickDeliverException();
}
-extern "C" void artThrowStackOverflowFromCode(Thread* self)
+extern "C" NO_RETURN void artThrowStackOverflowFromCode(Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
self->NoteSignalBeingHandled();
@@ -84,15 +84,16 @@ extern "C" void artThrowStackOverflowFromCode(Thread* self)
self->QuickDeliverException();
}
-extern "C" void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self)
+extern "C" NO_RETURN void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowNoSuchMethodError(method_idx);
self->QuickDeliverException();
}
-extern "C" void artThrowClassCastException(mirror::Class* dest_type, mirror::Class* src_type,
- Thread* self)
+extern "C" NO_RETURN void artThrowClassCastException(mirror::Class* dest_type,
+ mirror::Class* src_type,
+ Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
DCHECK(!dest_type->IsAssignableFrom(src_type));
@@ -100,8 +101,8 @@ extern "C" void artThrowClassCastException(mirror::Class* dest_type, mirror::Cla
self->QuickDeliverException();
}
-extern "C" void artThrowArrayStoreException(mirror::Object* array, mirror::Object* value,
- Thread* self)
+extern "C" NO_RETURN void artThrowArrayStoreException(mirror::Object* array, mirror::Object* value,
+ Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowArrayStoreException(value->GetClass(), array->GetClass());
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index db7a4ef..19d4e1a 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -83,6 +83,9 @@ void ConcurrentCopying::RunPhases() {
LOG(INFO) << "Verifying no from-space refs";
}
VerifyNoFromSpaceReferences();
+ if (kVerboseMode) {
+ LOG(INFO) << "Done verifying no from-space refs";
+ }
CheckEmptyMarkQueue();
}
{
@@ -174,7 +177,7 @@ class ThreadFlipVisitor : public Closure {
thread->RevokeThreadLocalAllocationStack();
}
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
- thread->VisitRoots(ConcurrentCopying::ProcessRootCallback, concurrent_copying_);
+ thread->VisitRoots(concurrent_copying_);
concurrent_copying_->GetBarrier().Pass(self);
}
@@ -208,7 +211,7 @@ class FlipCallback : public Closure {
if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
CHECK(Runtime::Current()->IsAotCompiler());
TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings());
- Runtime::Current()->VisitTransactionRoots(ConcurrentCopying::ProcessRootCallback, cc);
+ Runtime::Current()->VisitTransactionRoots(cc);
}
}
@@ -332,22 +335,20 @@ void ConcurrentCopying::MarkingPhase() {
}
{
TimingLogger::ScopedTiming split2("VisitConstantRoots", GetTimings());
- Runtime::Current()->VisitConstantRoots(ProcessRootCallback, this);
+ Runtime::Current()->VisitConstantRoots(this);
}
{
TimingLogger::ScopedTiming split3("VisitInternTableRoots", GetTimings());
- Runtime::Current()->GetInternTable()->VisitRoots(ProcessRootCallback,
- this, kVisitRootFlagAllRoots);
+ Runtime::Current()->GetInternTable()->VisitRoots(this, kVisitRootFlagAllRoots);
}
{
TimingLogger::ScopedTiming split4("VisitClassLinkerRoots", GetTimings());
- Runtime::Current()->GetClassLinker()->VisitRoots(ProcessRootCallback,
- this, kVisitRootFlagAllRoots);
+ Runtime::Current()->GetClassLinker()->VisitRoots(this, kVisitRootFlagAllRoots);
}
{
// TODO: don't visit the transaction roots if it's not active.
TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings());
- Runtime::Current()->VisitNonThreadRoots(ProcessRootCallback, this);
+ Runtime::Current()->VisitNonThreadRoots(this);
}
// Immune spaces.
@@ -486,7 +487,7 @@ inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) {
// The following visitors are that used to verify that there's no
// references to the from-space left after marking.
-class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor {
+class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
public:
explicit ConcurrentCopyingVerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
@@ -516,16 +517,14 @@ class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor {
}
}
- static void RootCallback(mirror::Object** root, void *arg, const RootInfo& /*root_info*/)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
- ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector);
+ void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
+ OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(root != nullptr);
- visitor(*root);
+ operator()(root);
}
private:
- ConcurrentCopying* collector_;
+ ConcurrentCopying* const collector_;
};
class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor {
@@ -594,8 +593,8 @@ void ConcurrentCopying::VerifyNoFromSpaceReferences() {
// Roots.
{
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
- Runtime::Current()->VisitRoots(
- ConcurrentCopyingVerifyNoFromSpaceRefsVisitor::RootCallback, this);
+ ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this);
+ Runtime::Current()->VisitRoots(&ref_visitor);
}
// The to-space.
region_space_->WalkToSpace(ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor::ObjectCallback,
@@ -808,6 +807,9 @@ class ConcurrentCopyingClearBlackPtrsVisitor {
public:
explicit ConcurrentCopyingClearBlackPtrsVisitor(ConcurrentCopying* cc)
: collector_(cc) {}
+#ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
+ NO_RETURN
+#endif
void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
DCHECK(obj != nullptr);
@@ -1087,11 +1089,6 @@ void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset
}
}
-void ConcurrentCopying::ProcessRootCallback(mirror::Object** root, void* arg,
- const RootInfo& /*root_info*/) {
- reinterpret_cast<ConcurrentCopying*>(arg)->Process(root);
-}
-
// Used to scan ref fields of an object.
class ConcurrentCopyingRefFieldsVisitor {
public:
@@ -1144,25 +1141,54 @@ inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset)
offset, expected_ref, new_ref));
}
-// Process a root.
-void ConcurrentCopying::Process(mirror::Object** root) {
- mirror::Object* ref = *root;
- if (ref == nullptr || region_space_->IsInToSpace(ref)) {
- return;
- }
- mirror::Object* to_ref = Mark(ref);
- if (to_ref == ref) {
- return;
+// Process some roots.
+void ConcurrentCopying::VisitRoots(
+ mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) {
+ for (size_t i = 0; i < count; ++i) {
+ mirror::Object** root = roots[i];
+ mirror::Object* ref = *root;
+ if (ref == nullptr || region_space_->IsInToSpace(ref)) {
+ return;
+ }
+ mirror::Object* to_ref = Mark(ref);
+ if (to_ref == ref) {
+ return;
+ }
+ Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root);
+ mirror::Object* expected_ref = ref;
+ mirror::Object* new_ref = to_ref;
+ do {
+ if (expected_ref != addr->LoadRelaxed()) {
+ // It was updated by the mutator.
+ break;
+ }
+ } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref));
}
- Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root);
- mirror::Object* expected_ref = ref;
- mirror::Object* new_ref = to_ref;
- do {
- if (expected_ref != addr->LoadRelaxed()) {
- // It was updated by the mutator.
- break;
+}
+
+void ConcurrentCopying::VisitRoots(
+ mirror::CompressedReference<mirror::Object>** roots, size_t count,
+ const RootInfo& info ATTRIBUTE_UNUSED) {
+ for (size_t i = 0; i < count; ++i) {
+ mirror::CompressedReference<mirror::Object>* root = roots[i];
+ mirror::Object* ref = root->AsMirrorPtr();
+ if (ref == nullptr || region_space_->IsInToSpace(ref)) {
+ return;
}
- } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref));
+ mirror::Object* to_ref = Mark(ref);
+ if (to_ref == ref) {
+ return;
+ }
+ auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
+ auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref);
+ auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref);
+ do {
+ if (ref != addr->LoadRelaxed().AsMirrorPtr()) {
+ // It was updated by the mutator.
+ break;
+ }
+ } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref));
+ }
}
// Fill the given memory block with a dummy object. Used to fill in a
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index bbb551a..93de035 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -192,9 +192,11 @@ class ConcurrentCopying : public GarbageCollector {
void Scan(mirror::Object* to_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void Process(mirror::Object* obj, MemberOffset offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void Process(mirror::Object** root) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void ProcessRootCallback(mirror::Object** root, void* arg, const RootInfo& root_info)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
+ OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
+ const RootInfo& info)
+ OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void VerifyNoFromSpaceReferences() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
accounting::ObjectStack* GetAllocationStack();
accounting::ObjectStack* GetLiveStack();
@@ -230,7 +232,7 @@ class ConcurrentCopying : public GarbageCollector {
bool IsOnAllocStack(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::Object* GetFwdPtr(mirror::Object* from_ref)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void FlipThreadRoots() LOCKS_EXCLUDED(Locks::mutator_lock_);;
+ void FlipThreadRoots() LOCKS_EXCLUDED(Locks::mutator_lock_);
void SwapStacks(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void RecordLiveStackFreezeSize(Thread* self);
void ComputeUnevacFromSpaceLiveRatio();
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index ed5207a..c5a8d5d 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -22,6 +22,7 @@
#include "base/timing_logger.h"
#include "gc/collector_type.h"
#include "gc/gc_cause.h"
+#include "gc_root.h"
#include "gc_type.h"
#include <stdint.h>
#include <vector>
@@ -112,7 +113,7 @@ class Iteration {
DISALLOW_COPY_AND_ASSIGN(Iteration);
};
-class GarbageCollector {
+class GarbageCollector : public RootVisitor {
public:
class SCOPED_LOCKABLE ScopedPause {
public:
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index d1ce0bc..8902df8 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -309,19 +309,57 @@ void MarkCompact::DelayReferenceReferentCallback(mirror::Class* klass, mirror::R
reinterpret_cast<MarkCompact*>(arg)->DelayReferenceReferent(klass, ref);
}
-void MarkCompact::MarkRootCallback(Object** root, void* arg, const RootInfo& /*root_info*/) {
- reinterpret_cast<MarkCompact*>(arg)->MarkObject(*root);
+void MarkCompact::VisitRoots(
+ mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) {
+ for (size_t i = 0; i < count; ++i) {
+ MarkObject(*roots[i]);
+ }
}
-void MarkCompact::UpdateRootCallback(Object** root, void* arg, const RootInfo& /*root_info*/) {
- mirror::Object* obj = *root;
- mirror::Object* new_obj = reinterpret_cast<MarkCompact*>(arg)->GetMarkedForwardAddress(obj);
- if (obj != new_obj) {
- *root = new_obj;
- DCHECK(new_obj != nullptr);
+void MarkCompact::VisitRoots(
+ mirror::CompressedReference<mirror::Object>** roots, size_t count,
+ const RootInfo& info ATTRIBUTE_UNUSED) {
+ for (size_t i = 0; i < count; ++i) {
+ MarkObject(roots[i]->AsMirrorPtr());
}
}
+class UpdateRootVisitor : public RootVisitor {
+ public:
+ explicit UpdateRootVisitor(MarkCompact* collector) : collector_(collector) {
+ }
+
+ void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
+ OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ for (size_t i = 0; i < count; ++i) {
+ mirror::Object* obj = *roots[i];
+ mirror::Object* new_obj = collector_->GetMarkedForwardAddress(obj);
+ if (obj != new_obj) {
+ *roots[i] = new_obj;
+ DCHECK(new_obj != nullptr);
+ }
+ }
+ }
+
+ void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
+ const RootInfo& info ATTRIBUTE_UNUSED)
+ OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ for (size_t i = 0; i < count; ++i) {
+ mirror::Object* obj = roots[i]->AsMirrorPtr();
+ mirror::Object* new_obj = collector_->GetMarkedForwardAddress(obj);
+ if (obj != new_obj) {
+ roots[i]->Assign(new_obj);
+ DCHECK(new_obj != nullptr);
+ }
+ }
+ }
+
+ private:
+ MarkCompact* const collector_;
+};
+
class UpdateObjectReferencesVisitor {
public:
explicit UpdateObjectReferencesVisitor(MarkCompact* collector) : collector_(collector) {
@@ -339,7 +377,8 @@ void MarkCompact::UpdateReferences() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Runtime* runtime = Runtime::Current();
// Update roots.
- runtime->VisitRoots(UpdateRootCallback, this);
+ UpdateRootVisitor update_root_visitor(this);
+ runtime->VisitRoots(&update_root_visitor);
// Update object references in mod union tables and spaces.
for (const auto& space : heap_->GetContinuousSpaces()) {
// If the space is immune then we need to mark the references to other spaces.
@@ -396,7 +435,7 @@ void MarkCompact::Compact() {
// Marks all objects in the root set.
void MarkCompact::MarkRoots() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
- Runtime::Current()->VisitRoots(MarkRootCallback, this);
+ Runtime::Current()->VisitRoots(this);
}
mirror::Object* MarkCompact::MarkedForwardingAddressCallback(mirror::Object* obj, void* arg) {
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index 06304bf..4337644 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -114,8 +114,12 @@ class MarkCompact : public GarbageCollector {
void SweepSystemWeaks()
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
- static void MarkRootCallback(mirror::Object** root, void* arg, const RootInfo& root_info)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
+ OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
+ virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
+ const RootInfo& info)
+ OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
static mirror::Object* MarkObjectCallback(mirror::Object* root, void* arg)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
@@ -245,6 +249,8 @@ class MarkCompact : public GarbageCollector {
friend class MoveObjectVisitor;
friend class UpdateObjectReferencesVisitor;
friend class UpdateReferenceVisitor;
+ friend class UpdateRootVisitor;
+
DISALLOW_COPY_AND_ASSIGN(MarkCompact);
};
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index ee4e752..79d1034 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -462,42 +462,66 @@ inline void MarkSweep::MarkObject(Object* obj) {
}
}
-void MarkSweep::MarkRootParallelCallback(Object** root, void* arg, const RootInfo& /*root_info*/) {
- reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(*root);
-}
+class VerifyRootMarkedVisitor : public SingleRootVisitor {
+ public:
+ explicit VerifyRootMarkedVisitor(MarkSweep* collector) : collector_(collector) { }
-void MarkSweep::VerifyRootMarked(Object** root, void* arg, const RootInfo& /*root_info*/) {
- CHECK(reinterpret_cast<MarkSweep*>(arg)->IsMarked(*root));
-}
+ void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ CHECK(collector_->IsMarked(root)) << info.ToString();
+ }
+
+ private:
+ MarkSweep* const collector_;
+};
-void MarkSweep::MarkRootCallback(Object** root, void* arg, const RootInfo& /*root_info*/) {
- reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(*root);
+void MarkSweep::VisitRoots(mirror::Object*** roots, size_t count,
+ const RootInfo& info ATTRIBUTE_UNUSED) {
+ for (size_t i = 0; i < count; ++i) {
+ MarkObjectNonNull(*roots[i]);
+ }
}
-void MarkSweep::VerifyRootCallback(Object** root, void* arg, const RootInfo& root_info) {
- reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(*root, root_info);
+void MarkSweep::VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
+ const RootInfo& info ATTRIBUTE_UNUSED) {
+ for (size_t i = 0; i < count; ++i) {
+ MarkObjectNonNull(roots[i]->AsMirrorPtr());
+ }
}
+class VerifyRootVisitor : public SingleRootVisitor {
+ public:
+ explicit VerifyRootVisitor(MarkSweep* collector) : collector_(collector) { }
+
+ void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ collector_->VerifyRoot(root, info);
+ }
+
+ private:
+ MarkSweep* const collector_;
+};
+
void MarkSweep::VerifyRoot(const Object* root, const RootInfo& root_info) {
// See if the root is on any space bitmap.
if (heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
if (large_object_space != nullptr && !large_object_space->Contains(root)) {
- LOG(ERROR) << "Found invalid root: " << root << " ";
- root_info.Describe(LOG(ERROR));
+ LOG(ERROR) << "Found invalid root: " << root << " " << root_info;
}
}
}
void MarkSweep::VerifyRoots() {
- Runtime::Current()->GetThreadList()->VisitRoots(VerifyRootCallback, this);
+ VerifyRootVisitor visitor(this);
+ Runtime::Current()->GetThreadList()->VisitRoots(&visitor);
}
void MarkSweep::MarkRoots(Thread* self) {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
// If we exclusively hold the mutator lock, all threads must be suspended.
- Runtime::Current()->VisitRoots(MarkRootCallback, this);
+ Runtime::Current()->VisitRoots(this);
RevokeAllThreadLocalAllocationStacks(self);
} else {
MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint);
@@ -510,13 +534,13 @@ void MarkSweep::MarkRoots(Thread* self) {
void MarkSweep::MarkNonThreadRoots() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
- Runtime::Current()->VisitNonThreadRoots(MarkRootCallback, this);
+ Runtime::Current()->VisitNonThreadRoots(this);
}
void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
// Visit all runtime roots and clear dirty flags.
- Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, flags);
+ Runtime::Current()->VisitConcurrentRoots(this, flags);
}
class ScanObjectVisitor {
@@ -932,13 +956,12 @@ void MarkSweep::RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age) {
void MarkSweep::ReMarkRoots() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
- Runtime::Current()->VisitRoots(
- MarkRootCallback, this, static_cast<VisitRootFlags>(kVisitRootFlagNewRoots |
- kVisitRootFlagStopLoggingNewRoots |
- kVisitRootFlagClearRootLog));
+ Runtime::Current()->VisitRoots(this, static_cast<VisitRootFlags>(
+ kVisitRootFlagNewRoots | kVisitRootFlagStopLoggingNewRoots | kVisitRootFlagClearRootLog));
if (kVerifyRootsMarked) {
TimingLogger::ScopedTiming t2("(Paused)VerifyRoots", GetTimings());
- Runtime::Current()->VisitRoots(VerifyRootMarked, this);
+ VerifyRootMarkedVisitor visitor(this);
+ Runtime::Current()->VisitRoots(&visitor);
}
}
@@ -968,7 +991,7 @@ void MarkSweep::VerifySystemWeaks() {
Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this);
}
-class CheckpointMarkThreadRoots : public Closure {
+class CheckpointMarkThreadRoots : public Closure, public RootVisitor {
public:
explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep,
bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)
@@ -977,13 +1000,30 @@ class CheckpointMarkThreadRoots : public Closure {
revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
}
+ void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
+ OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ for (size_t i = 0; i < count; ++i) {
+ mark_sweep_->MarkObjectNonNullParallel(*roots[i]);
+ }
+ }
+
+ void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
+ const RootInfo& info ATTRIBUTE_UNUSED)
+ OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ for (size_t i = 0; i < count; ++i) {
+ mark_sweep_->MarkObjectNonNullParallel(roots[i]->AsMirrorPtr());
+ }
+ }
+
virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
ATRACE_BEGIN("Marking thread roots");
// Note: self is not necessarily equal to thread since thread may be suspended.
- Thread* self = Thread::Current();
+ Thread* const self = Thread::Current();
CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
<< thread->GetState() << " thread " << thread << " self " << self;
- thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_);
+ thread->VisitRoots(this);
ATRACE_END();
if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) {
ATRACE_BEGIN("RevokeRosAllocThreadLocalBuffers");
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 3f99e21..31cea17 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -185,11 +185,12 @@ class MarkSweep : public GarbageCollector {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- static void MarkRootCallback(mirror::Object** root, void* arg, const RootInfo& root_info)
+ virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- static void VerifyRootMarked(mirror::Object** root, void* arg, const RootInfo& root_info)
+ virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
+ const RootInfo& info) OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -197,9 +198,6 @@ class MarkSweep : public GarbageCollector {
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void MarkRootParallelCallback(mirror::Object** root, void* arg, const RootInfo& root_info)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// Marks an object.
void MarkObject(mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
@@ -250,9 +248,8 @@ class MarkSweep : public GarbageCollector {
// whether or not we care about pauses.
size_t GetThreadCount(bool paused) const;
- static void VerifyRootCallback(mirror::Object** root, void* arg, const RootInfo& root_info);
-
- void VerifyRoot(const mirror::Object* root, const RootInfo& root_info) NO_THREAD_SAFETY_ANALYSIS;
+ void VerifyRoot(const mirror::Object* root, const RootInfo& root_info)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// Push a single reference on a mark stack.
void PushOnMarkStack(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -326,18 +323,21 @@ class MarkSweep : public GarbageCollector {
friend class CardScanTask;
friend class CheckBitmapVisitor;
friend class CheckReferenceVisitor;
+ friend class CheckpointMarkThreadRoots;
friend class art::gc::Heap;
+ friend class FifoMarkStackChunk;
friend class MarkObjectVisitor;
+ template<bool kUseFinger> friend class MarkStackTask;
+ friend class MarkSweepMarkObjectSlowPath;
friend class ModUnionCheckReferences;
friend class ModUnionClearCardVisitor;
friend class ModUnionReferenceVisitor;
- friend class ModUnionVisitor;
+ friend class ModUnionScanImageRootVisitor;
friend class ModUnionTableBitmap;
friend class ModUnionTableReferenceCache;
- friend class ModUnionScanImageRootVisitor;
- template<bool kUseFinger> friend class MarkStackTask;
- friend class FifoMarkStackChunk;
- friend class MarkSweepMarkObjectSlowPath;
+ friend class ModUnionVisitor;
+ friend class VerifyRootMarkedVisitor;
+ friend class VerifyRootVisitor;
DISALLOW_COPY_AND_ASSIGN(MarkSweep);
};
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index b3d59f2..dbf01d8 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -603,18 +603,29 @@ void SemiSpace::DelayReferenceReferentCallback(mirror::Class* klass, mirror::Ref
reinterpret_cast<SemiSpace*>(arg)->DelayReferenceReferent(klass, ref);
}
-void SemiSpace::MarkRootCallback(Object** root, void* arg, const RootInfo& /*root_info*/) {
- auto ref = StackReference<mirror::Object>::FromMirrorPtr(*root);
- reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref);
- if (*root != ref.AsMirrorPtr()) {
- *root = ref.AsMirrorPtr();
+void SemiSpace::VisitRoots(mirror::Object*** roots, size_t count,
+ const RootInfo& info ATTRIBUTE_UNUSED) {
+ for (size_t i = 0; i < count; ++i) {
+ auto* root = roots[i];
+ auto ref = StackReference<mirror::Object>::FromMirrorPtr(*root);
+ MarkObject(&ref);
+ if (*root != ref.AsMirrorPtr()) {
+ *root = ref.AsMirrorPtr();
+ }
+ }
+}
+
+void SemiSpace::VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
+ const RootInfo& info ATTRIBUTE_UNUSED) {
+ for (size_t i = 0; i < count; ++i) {
+ MarkObject(roots[i]);
}
}
// Marks all objects in the root set.
void SemiSpace::MarkRoots() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
- Runtime::Current()->VisitRoots(MarkRootCallback, this);
+ Runtime::Current()->VisitRoots(this);
}
bool SemiSpace::HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* object,
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 192fb14..61fbead 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -98,7 +98,7 @@ class SemiSpace : public GarbageCollector {
// Find the default mark bitmap.
void FindDefaultMarkBitmap();
- // Returns the new address of the object.
+ // Updates obj_ptr if the object has moved.
template<bool kPoisonReferences>
void MarkObject(mirror::ObjectReference<kPoisonReferences, mirror::Object>* obj_ptr)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
@@ -133,8 +133,12 @@ class SemiSpace : public GarbageCollector {
void SweepSystemWeaks()
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
- static void MarkRootCallback(mirror::Object** root, void* arg, const RootInfo& root_info)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
+ virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
+ const RootInfo& info) OVERRIDE
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
static mirror::Object* MarkObjectCallback(mirror::Object* root, void* arg)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index be7344a..b9153c1 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -504,7 +504,6 @@ MemMap* Heap::MapAnonymousPreferredAddress(const char* name, uint8_t* request_be
// Retry a second time with no specified request begin.
request_begin = nullptr;
}
- return nullptr;
}
bool Heap::MayUseCollector(CollectorType type) const {
@@ -2395,13 +2394,21 @@ void Heap::FinishGC(Thread* self, collector::GcType gc_type) {
gc_complete_cond_->Broadcast(self);
}
-static void RootMatchesObjectVisitor(mirror::Object** root, void* arg,
- const RootInfo& /*root_info*/) {
- mirror::Object* obj = reinterpret_cast<mirror::Object*>(arg);
- if (*root == obj) {
- LOG(INFO) << "Object " << obj << " is a root";
+class RootMatchesObjectVisitor : public SingleRootVisitor {
+ public:
+ explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { }
+
+ void VisitRoot(mirror::Object* root, const RootInfo& info)
+ OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (root == obj_) {
+ LOG(INFO) << "Object " << obj_ << " is a root " << info.ToString();
+ }
}
-}
+
+ private:
+ const mirror::Object* const obj_;
+};
+
class ScanVisitor {
public:
@@ -2411,7 +2418,7 @@ class ScanVisitor {
};
// Verify a reference from an object.
-class VerifyReferenceVisitor {
+class VerifyReferenceVisitor : public SingleRootVisitor {
public:
explicit VerifyReferenceVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
@@ -2438,11 +2445,12 @@ class VerifyReferenceVisitor {
return heap_->IsLiveObjectLocked(obj, true, false, true);
}
- static void VerifyRootCallback(mirror::Object** root, void* arg, const RootInfo& root_info)
+ void VisitRoot(mirror::Object* root, const RootInfo& root_info) OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- VerifyReferenceVisitor* visitor = reinterpret_cast<VerifyReferenceVisitor*>(arg);
- if (!visitor->VerifyReference(nullptr, *root, MemberOffset(0))) {
- LOG(ERROR) << "Root " << *root << " is dead with type " << PrettyTypeOf(*root)
+ if (root == nullptr) {
+ LOG(ERROR) << "Root is null with info " << root_info.GetType();
+ } else if (!VerifyReference(nullptr, root, MemberOffset(0))) {
+ LOG(ERROR) << "Root " << root << " is dead with type " << PrettyTypeOf(root)
<< " thread_id= " << root_info.GetThreadId() << " root_type= " << root_info.GetType();
}
}
@@ -2534,12 +2542,11 @@ class VerifyReferenceVisitor {
}
// Search to see if any of the roots reference our object.
- void* arg = const_cast<void*>(reinterpret_cast<const void*>(obj));
- Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg);
-
+ RootMatchesObjectVisitor visitor1(obj);
+ Runtime::Current()->VisitRoots(&visitor1);
// Search to see if any of the roots reference our reference.
- arg = const_cast<void*>(reinterpret_cast<const void*>(ref));
- Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg);
+ RootMatchesObjectVisitor visitor2(ref);
+ Runtime::Current()->VisitRoots(&visitor2);
}
return false;
}
@@ -2571,6 +2578,13 @@ class VerifyObjectVisitor {
visitor->operator()(obj);
}
+ void VerifyRoots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) {
+ ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
+ Runtime::Current()->VisitRoots(&visitor);
+ }
+
size_t GetFailureCount() const {
return fail_count_->LoadSequentiallyConsistent();
}
@@ -2637,7 +2651,7 @@ size_t Heap::VerifyHeapReferences(bool verify_referents) {
// pointing to dead objects if they are not reachable.
VisitObjectsPaused(VerifyObjectVisitor::VisitCallback, &visitor);
// Verify the roots:
- Runtime::Current()->VisitRoots(VerifyReferenceVisitor::VerifyRootCallback, &visitor);
+ visitor.VerifyRoots();
if (visitor.GetFailureCount() > 0) {
// Dump mod-union tables.
for (const auto& table_pair : mod_union_tables_) {
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index b09de6f..9195b06 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -259,7 +259,6 @@ void MallocSpace::ClampGrowthLimit() {
}
GetMemMap()->SetSize(new_capacity);
limit_ = Begin() + new_capacity;
- CHECK(temp_bitmap_.get() == nullptr);
}
} // namespace space
diff --git a/runtime/gc/task_processor.cc b/runtime/gc/task_processor.cc
index 1a3c6f5..2ca4b3f 100644
--- a/runtime/gc/task_processor.cc
+++ b/runtime/gc/task_processor.cc
@@ -67,7 +67,6 @@ HeapTask* TaskProcessor::GetTask(Thread* self) {
}
}
UNREACHABLE();
- return nullptr;
}
void TaskProcessor::UpdateTargetRunTime(Thread* self, HeapTask* task, uint64_t new_target_time) {
diff --git a/runtime/gc_root-inl.h b/runtime/gc_root-inl.h
index a42ec08..57d5689 100644
--- a/runtime/gc_root-inl.h
+++ b/runtime/gc_root-inl.h
@@ -19,6 +19,8 @@
#include "gc_root.h"
+#include <sstream>
+
#include "read_barrier-inl.h"
namespace art {
@@ -26,7 +28,17 @@ namespace art {
template<class MirrorType>
template<ReadBarrierOption kReadBarrierOption>
inline MirrorType* GcRoot<MirrorType>::Read() const {
- return ReadBarrier::BarrierForRoot<MirrorType, kReadBarrierOption>(&root_);
+ return down_cast<MirrorType*>(
+ ReadBarrier::BarrierForRoot<mirror::Object, kReadBarrierOption>(&root_));
+}
+template<class MirrorType>
+inline GcRoot<MirrorType>::GcRoot(MirrorType* ref)
+ : root_(mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref)) { }
+
+inline std::string RootInfo::ToString() const {
+ std::ostringstream oss;
+ Describe(oss);
+ return oss.str();
}
} // namespace art
diff --git a/runtime/gc_root.h b/runtime/gc_root.h
index c5feda5..2f4da3f 100644
--- a/runtime/gc_root.h
+++ b/runtime/gc_root.h
@@ -19,6 +19,7 @@
#include "base/macros.h"
#include "base/mutex.h" // For Locks::mutator_lock_.
+#include "mirror/object_reference.h"
namespace art {
@@ -26,6 +27,9 @@ namespace mirror {
class Object;
} // namespace mirror
+template <size_t kBufferSize>
+class BufferedRootVisitor;
+
enum RootType {
kRootUnknown = 0,
kRootJNIGlobal,
@@ -43,12 +47,14 @@ enum RootType {
};
std::ostream& operator<<(std::ostream& os, const RootType& root_type);
+// Only used by hprof. tid and root_type are only used by hprof.
class RootInfo {
public:
// Thread id 0 is for non thread roots.
explicit RootInfo(RootType type, uint32_t thread_id = 0)
: type_(type), thread_id_(thread_id) {
}
+ RootInfo(const RootInfo&) = default;
virtual ~RootInfo() {
}
RootType GetType() const {
@@ -60,15 +66,64 @@ class RootInfo {
virtual void Describe(std::ostream& os) const {
os << "Type=" << type_ << " thread_id=" << thread_id_;
}
+ std::string ToString() const;
private:
const RootType type_;
const uint32_t thread_id_;
};
-// Returns the new address of the object, returns root if it has not moved. tid and root_type are
-// only used by hprof.
-typedef void (RootCallback)(mirror::Object** root, void* arg, const RootInfo& root_info);
+inline std::ostream& operator<<(std::ostream& os, const RootInfo& root_info) {
+ root_info.Describe(os);
+ return os;
+}
+
+class RootVisitor {
+ public:
+ virtual ~RootVisitor() { }
+
+ // Single root versions, not overridable.
+ ALWAYS_INLINE void VisitRoot(mirror::Object** roots, const RootInfo& info)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ VisitRoots(&roots, 1, info);
+ }
+
+ ALWAYS_INLINE void VisitRootIfNonNull(mirror::Object** roots, const RootInfo& info)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (*roots != nullptr) {
+ VisitRoot(roots, info);
+ }
+ }
+
+ virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+
+ virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
+ const RootInfo& info)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+};
+
+// Only visits roots one at a time, doesn't handle updating roots. Used when performance isn't
+// critical.
+class SingleRootVisitor : public RootVisitor {
+ private:
+ void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ for (size_t i = 0; i < count; ++i) {
+ VisitRoot(*roots[i], info);
+ }
+ }
+
+ void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
+ const RootInfo& info) OVERRIDE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ for (size_t i = 0; i < count; ++i) {
+ VisitRoot(roots[i]->AsMirrorPtr(), info);
+ }
+ }
+
+ virtual void VisitRoot(mirror::Object* root, const RootInfo& info) = 0;
+};
template<class MirrorType>
class GcRoot {
@@ -76,37 +131,92 @@ class GcRoot {
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ALWAYS_INLINE MirrorType* Read() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void VisitRoot(RootCallback* callback, void* arg, const RootInfo& info) const {
+ void VisitRoot(RootVisitor* visitor, const RootInfo& info) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(!IsNull());
- callback(reinterpret_cast<mirror::Object**>(&root_), arg, info);
+ mirror::CompressedReference<mirror::Object>* roots[1] = { &root_ };
+ visitor->VisitRoots(roots, 1u, info);
DCHECK(!IsNull());
}
- void VisitRootIfNonNull(RootCallback* callback, void* arg, const RootInfo& info) const {
+ void VisitRootIfNonNull(RootVisitor* visitor, const RootInfo& info) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (!IsNull()) {
- VisitRoot(callback, arg, info);
+ VisitRoot(visitor, info);
}
}
- // This is only used by IrtIterator.
- ALWAYS_INLINE MirrorType** AddressWithoutBarrier() {
+ ALWAYS_INLINE mirror::CompressedReference<mirror::Object>* AddressWithoutBarrier() {
return &root_;
}
- bool IsNull() const {
+ ALWAYS_INLINE bool IsNull() const {
// It's safe to null-check it without a read barrier.
- return root_ == nullptr;
+ return root_.IsNull();
}
- ALWAYS_INLINE explicit GcRoot<MirrorType>() : root_(nullptr) {
+ ALWAYS_INLINE GcRoot(MirrorType* ref = nullptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ private:
+ mutable mirror::CompressedReference<mirror::Object> root_;
+
+ template <size_t kBufferSize> friend class BufferedRootVisitor;
+};
+
+// Simple data structure for buffered root visiting to avoid virtual dispatch overhead. Currently
+// only for CompressedReferences since these are more common than the Object** roots which are only
+// for thread local roots.
+template <size_t kBufferSize>
+class BufferedRootVisitor {
+ public:
+ BufferedRootVisitor(RootVisitor* visitor, const RootInfo& root_info)
+ : visitor_(visitor), root_info_(root_info), buffer_pos_(0) {
+ }
+
+ ~BufferedRootVisitor() {
+ Flush();
+ }
+
+ template <class MirrorType>
+ ALWAYS_INLINE void VisitRootIfNonNull(GcRoot<MirrorType>& root)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (!root.IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ template <class MirrorType>
+ ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ template <class MirrorType>
+ void VisitRoot(GcRoot<MirrorType>& root) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ VisitRoot(root.AddressWithoutBarrier());
+ }
+
+ template <class MirrorType>
+ void VisitRoot(mirror::CompressedReference<MirrorType>* root)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (UNLIKELY(buffer_pos_ >= kBufferSize)) {
+ Flush();
+ }
+ roots_[buffer_pos_++] = root;
}
- ALWAYS_INLINE explicit GcRoot<MirrorType>(MirrorType* ref)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : root_(ref) {
+ void Flush() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ visitor_->VisitRoots(roots_, buffer_pos_, root_info_);
+ buffer_pos_ = 0;
}
private:
- mutable MirrorType* root_;
+ RootVisitor* const visitor_;
+ RootInfo root_info_;
+ mirror::CompressedReference<mirror::Object>* roots_[kBufferSize];
+ size_t buffer_pos_;
};
} // namespace art
diff --git a/runtime/handle.h b/runtime/handle.h
index 6af3220..3ebb2d5 100644
--- a/runtime/handle.h
+++ b/runtime/handle.h
@@ -70,6 +70,16 @@ class Handle : public ValueObject {
return reinterpret_cast<jobject>(reference_);
}
+ StackReference<mirror::Object>* GetReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ ALWAYS_INLINE {
+ return reference_;
+ }
+
+ ALWAYS_INLINE const StackReference<mirror::Object>* GetReference() const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return reference_;
+ }
+
protected:
template<typename S>
explicit Handle(StackReference<S>* reference)
@@ -80,14 +90,6 @@ class Handle : public ValueObject {
: reference_(handle.reference_) {
}
- StackReference<mirror::Object>* GetReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
- return reference_;
- }
- ALWAYS_INLINE const StackReference<mirror::Object>* GetReference() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return reference_;
- }
-
StackReference<mirror::Object>* reference_;
private:
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index a836578..271312e 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -133,6 +133,8 @@ class HandleWrapper : public MutableHandle<T> {
: MutableHandle<T>(handle), obj_(obj) {
}
+ HandleWrapper(const HandleWrapper&) = default;
+
~HandleWrapper() {
*obj_ = MutableHandle<T>::Get();
}
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 656569c..d6a6595 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -222,7 +222,7 @@ class EndianOutput {
HandleU4List(values, count);
length_ += count * sizeof(uint32_t);
}
- virtual void UpdateU4(size_t offset ATTRIBUTE_UNUSED, uint32_t new_value ATTRIBUTE_UNUSED) {
+ virtual void UpdateU4(size_t offset, uint32_t new_value ATTRIBUTE_UNUSED) {
DCHECK_LE(offset, length_ - 4);
}
void AddU8List(const uint64_t* values, size_t count) {
@@ -403,9 +403,9 @@ class NetStateEndianOutput FINAL : public EndianOutputBuffered {
JDWP::JdwpNetStateBase* net_state_;
};
-#define __ output->
+#define __ output_->
-class Hprof {
+class Hprof : public SingleRootVisitor {
public:
Hprof(const char* output_filename, int fd, bool direct_to_ddms)
: filename_(output_filename),
@@ -426,9 +426,11 @@ class Hprof {
size_t max_length;
{
EndianOutput count_output;
- ProcessHeap(&count_output, false);
+ output_ = &count_output;
+ ProcessHeap(false);
overall_size = count_output.SumLength();
max_length = count_output.MaxLength();
+ output_ = nullptr;
}
bool okay;
@@ -451,86 +453,70 @@ class Hprof {
}
private:
- struct Env {
- Hprof* hprof;
- EndianOutput* output;
- };
-
- static void RootVisitor(mirror::Object** obj, void* arg, const RootInfo& root_info)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(arg != nullptr);
- DCHECK(obj != nullptr);
- DCHECK(*obj != nullptr);
- Env* env = reinterpret_cast<Env*>(arg);
- env->hprof->VisitRoot(*obj, root_info, env->output);
- }
-
static void VisitObjectCallback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(obj != nullptr);
DCHECK(arg != nullptr);
- Env* env = reinterpret_cast<Env*>(arg);
- env->hprof->DumpHeapObject(obj, env->output);
+ reinterpret_cast<Hprof*>(arg)->DumpHeapObject(obj);
}
- void DumpHeapObject(mirror::Object* obj, EndianOutput* output)
+ void DumpHeapObject(mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void DumpHeapClass(mirror::Class* klass, EndianOutput* output)
+ void DumpHeapClass(mirror::Class* klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void DumpHeapArray(mirror::Array* obj, mirror::Class* klass, EndianOutput* output)
+ void DumpHeapArray(mirror::Array* obj, mirror::Class* klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void DumpHeapInstanceObject(mirror::Object* obj, mirror::Class* klass, EndianOutput* output)
+ void DumpHeapInstanceObject(mirror::Object* obj, mirror::Class* klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void ProcessHeap(EndianOutput* output, bool header_first)
+ void ProcessHeap(bool header_first)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Reset current heap and object count.
current_heap_ = HPROF_HEAP_DEFAULT;
objects_in_segment_ = 0;
if (header_first) {
- ProcessHeader(output);
- ProcessBody(output);
+ ProcessHeader();
+ ProcessBody();
} else {
- ProcessBody(output);
- ProcessHeader(output);
+ ProcessBody();
+ ProcessHeader();
}
}
- void ProcessBody(EndianOutput* output) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Runtime* runtime = Runtime::Current();
+ void ProcessBody() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Runtime* const runtime = Runtime::Current();
// Walk the roots and the heap.
- output->StartNewRecord(HPROF_TAG_HEAP_DUMP_SEGMENT, kHprofTime);
+ output_->StartNewRecord(HPROF_TAG_HEAP_DUMP_SEGMENT, kHprofTime);
- Env env = { this, output };
- runtime->VisitRoots(RootVisitor, &env);
- runtime->VisitImageRoots(RootVisitor, &env);
- runtime->GetHeap()->VisitObjectsPaused(VisitObjectCallback, &env);
+ runtime->VisitRoots(this);
+ runtime->VisitImageRoots(this);
+ runtime->GetHeap()->VisitObjectsPaused(VisitObjectCallback, this);
- output->StartNewRecord(HPROF_TAG_HEAP_DUMP_END, kHprofTime);
- output->EndRecord();
+ output_->StartNewRecord(HPROF_TAG_HEAP_DUMP_END, kHprofTime);
+ output_->EndRecord();
}
- void ProcessHeader(EndianOutput* output) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void ProcessHeader() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Write the header.
- WriteFixedHeader(output);
+ WriteFixedHeader();
// Write the string and class tables, and any stack traces, to the header.
// (jhat requires that these appear before any of the data in the body that refers to them.)
- WriteStringTable(output);
- WriteClassTable(output);
- WriteStackTraces(output);
- output->EndRecord();
+ WriteStringTable();
+ WriteClassTable();
+ WriteStackTraces();
+ output_->EndRecord();
}
- void WriteClassTable(EndianOutput* output) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void WriteClassTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
uint32_t nextSerialNumber = 1;
for (mirror::Class* c : classes_) {
CHECK(c != nullptr);
- output->StartNewRecord(HPROF_TAG_LOAD_CLASS, kHprofTime);
+ output_->StartNewRecord(HPROF_TAG_LOAD_CLASS, kHprofTime);
// LOAD CLASS format:
// U4: class serial number (always > 0)
// ID: class object ID. We use the address of the class object structure as its ID.
@@ -543,12 +529,12 @@ class Hprof {
}
}
- void WriteStringTable(EndianOutput* output) {
+ void WriteStringTable() {
for (const std::pair<std::string, HprofStringId>& p : strings_) {
const std::string& string = p.first;
const size_t id = p.second;
- output->StartNewRecord(HPROF_TAG_STRING, kHprofTime);
+ output_->StartNewRecord(HPROF_TAG_STRING, kHprofTime);
// STRING format:
// ID: ID for this string
@@ -559,24 +545,24 @@ class Hprof {
}
}
- void StartNewHeapDumpSegment(EndianOutput* output) {
+ void StartNewHeapDumpSegment() {
// This flushes the old segment and starts a new one.
- output->StartNewRecord(HPROF_TAG_HEAP_DUMP_SEGMENT, kHprofTime);
+ output_->StartNewRecord(HPROF_TAG_HEAP_DUMP_SEGMENT, kHprofTime);
objects_in_segment_ = 0;
// Starting a new HEAP_DUMP resets the heap to default.
current_heap_ = HPROF_HEAP_DEFAULT;
}
- void CheckHeapSegmentConstraints(EndianOutput* output) {
- if (objects_in_segment_ >= kMaxObjectsPerSegment || output->Length() >= kMaxBytesPerSegment) {
- StartNewHeapDumpSegment(output);
+ void CheckHeapSegmentConstraints() {
+ if (objects_in_segment_ >= kMaxObjectsPerSegment || output_->Length() >= kMaxBytesPerSegment) {
+ StartNewHeapDumpSegment();
}
}
- void VisitRoot(const mirror::Object* obj, const RootInfo& root_info, EndianOutput* output)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void VisitRoot(mirror::Object* obj, const RootInfo& root_info)
+ OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void MarkRootObject(const mirror::Object* obj, jobject jni_obj, HprofHeapTag heap_tag,
- uint32_t thread_serial, EndianOutput* output);
+ uint32_t thread_serial);
HprofClassObjectId LookupClassId(mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (c != nullptr) {
@@ -611,7 +597,7 @@ class Hprof {
return LookupStringId(PrettyDescriptor(c));
}
- void WriteFixedHeader(EndianOutput* output) {
+ void WriteFixedHeader() {
// Write the file header.
// U1: NUL-terminated magic string.
const char magic[] = "JAVA PROFILE 1.0.3";
@@ -635,9 +621,9 @@ class Hprof {
__ AddU4(static_cast<uint32_t>(nowMs & 0xFFFFFFFF));
}
- void WriteStackTraces(EndianOutput* output) {
+ void WriteStackTraces() {
// Write a dummy stack trace record so the analysis tools don't freak out.
- output->StartNewRecord(HPROF_TAG_STACK_TRACE, kHprofTime);
+ output_->StartNewRecord(HPROF_TAG_STACK_TRACE, kHprofTime);
__ AddU4(kHprofNullStackTrace);
__ AddU4(kHprofNullThread);
__ AddU4(0); // no frames
@@ -679,13 +665,15 @@ class Hprof {
bool okay;
{
FileEndianOutput file_output(file.get(), max_length);
- ProcessHeap(&file_output, true);
+ output_ = &file_output;
+ ProcessHeap(true);
okay = !file_output.Errors();
if (okay) {
// Check for expected size.
CHECK_EQ(file_output.SumLength(), overall_size);
}
+ output_ = nullptr;
}
if (okay) {
@@ -721,13 +709,15 @@ class Hprof {
// Prepare the output and send the chunk header.
NetStateEndianOutput net_output(net_state, max_length);
+ output_ = &net_output;
net_output.AddU1List(chunk_header, kChunkHeaderSize);
// Write the dump.
- ProcessHeap(&net_output, true);
+ ProcessHeap(true);
// Check for expected size.
CHECK_EQ(net_output.SumLength(), overall_size + kChunkHeaderSize);
+ output_ = nullptr;
return true;
}
@@ -741,6 +731,8 @@ class Hprof {
uint64_t start_ns_;
+ EndianOutput* output_;
+
HprofHeapId current_heap_; // Which heap we're currently dumping.
size_t objects_in_segment_;
@@ -811,12 +803,12 @@ static HprofBasicType SignatureToBasicTypeAndSize(const char* sig, size_t* size_
// only true when marking the root set or unreachable
// objects. Used to add rootset references to obj.
void Hprof::MarkRootObject(const mirror::Object* obj, jobject jni_obj, HprofHeapTag heap_tag,
- uint32_t thread_serial, EndianOutput* output) {
+ uint32_t thread_serial) {
if (heap_tag == 0) {
return;
}
- CheckHeapSegmentConstraints(output);
+ CheckHeapSegmentConstraints();
switch (heap_tag) {
// ID: object ID
@@ -892,7 +884,7 @@ static int StackTraceSerialNumber(const mirror::Object* /*obj*/) {
return kHprofNullStackTrace;
}
-void Hprof::DumpHeapObject(mirror::Object* obj, EndianOutput* output) {
+void Hprof::DumpHeapObject(mirror::Object* obj) {
// Ignore classes that are retired.
if (obj->IsClass() && obj->AsClass()->IsRetired()) {
return;
@@ -908,7 +900,7 @@ void Hprof::DumpHeapObject(mirror::Object* obj, EndianOutput* output) {
heap_type = HPROF_HEAP_IMAGE;
}
}
- CheckHeapSegmentConstraints(output);
+ CheckHeapSegmentConstraints();
if (heap_type != current_heap_) {
HprofStringId nameId;
@@ -945,18 +937,18 @@ void Hprof::DumpHeapObject(mirror::Object* obj, EndianOutput* output) {
// allocated which hasn't been initialized yet.
} else {
if (obj->IsClass()) {
- DumpHeapClass(obj->AsClass(), output);
+ DumpHeapClass(obj->AsClass());
} else if (c->IsArrayClass()) {
- DumpHeapArray(obj->AsArray(), c, output);
+ DumpHeapArray(obj->AsArray(), c);
} else {
- DumpHeapInstanceObject(obj, c, output);
+ DumpHeapInstanceObject(obj, c);
}
}
++objects_in_segment_;
}
-void Hprof::DumpHeapClass(mirror::Class* klass, EndianOutput* output) {
+void Hprof::DumpHeapClass(mirror::Class* klass) {
size_t sFieldCount = klass->NumStaticFields();
if (sFieldCount != 0) {
int byteLength = sFieldCount * sizeof(JValue); // TODO bogus; fields are packed
@@ -1049,7 +1041,7 @@ void Hprof::DumpHeapClass(mirror::Class* klass, EndianOutput* output) {
}
}
-void Hprof::DumpHeapArray(mirror::Array* obj, mirror::Class* klass, EndianOutput* output) {
+void Hprof::DumpHeapArray(mirror::Array* obj, mirror::Class* klass) {
uint32_t length = obj->GetLength();
if (obj->IsObjectArray()) {
@@ -1089,8 +1081,7 @@ void Hprof::DumpHeapArray(mirror::Array* obj, mirror::Class* klass, EndianOutput
}
}
-void Hprof::DumpHeapInstanceObject(mirror::Object* obj, mirror::Class* klass,
- EndianOutput* output) {
+void Hprof::DumpHeapInstanceObject(mirror::Object* obj, mirror::Class* klass) {
// obj is an instance object.
__ AddU1(HPROF_INSTANCE_DUMP);
__ AddObjectId(obj);
@@ -1099,7 +1090,7 @@ void Hprof::DumpHeapInstanceObject(mirror::Object* obj, mirror::Class* klass,
// Reserve some space for the length of the instance data, which we won't
// know until we're done writing it.
- size_t size_patch_offset = output->Length();
+ size_t size_patch_offset = output_->Length();
__ AddU4(0x77777777);
// Write the instance data; fields for this class, followed by super class fields,
@@ -1139,10 +1130,10 @@ void Hprof::DumpHeapInstanceObject(mirror::Object* obj, mirror::Class* klass,
}
// Patch the instance field length.
- __ UpdateU4(size_patch_offset, output->Length() - (size_patch_offset + 4));
+ __ UpdateU4(size_patch_offset, output_->Length() - (size_patch_offset + 4));
}
-void Hprof::VisitRoot(const mirror::Object* obj, const RootInfo& info, EndianOutput* output) {
+void Hprof::VisitRoot(mirror::Object* obj, const RootInfo& info) {
static const HprofHeapTag xlate[] = {
HPROF_ROOT_UNKNOWN,
HPROF_ROOT_JNI_GLOBAL,
@@ -1164,7 +1155,7 @@ void Hprof::VisitRoot(const mirror::Object* obj, const RootInfo& info, EndianOut
if (obj == nullptr) {
return;
}
- MarkRootObject(obj, 0, xlate[info.GetType()], info.GetThreadId(), output);
+ MarkRootObject(obj, 0, xlate[info.GetType()], info.GetThreadId());
}
// If "direct_to_ddms" is true, the other arguments are ignored, and data is
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 1a3f107..a3aa1de 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -242,16 +242,10 @@ void IndirectReferenceTable::Trim() {
madvise(release_start, release_end - release_start, MADV_DONTNEED);
}
-void IndirectReferenceTable::VisitRoots(RootCallback* callback, void* arg,
- const RootInfo& root_info) {
+void IndirectReferenceTable::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) {
+ BufferedRootVisitor<128> root_visitor(visitor, root_info);
for (auto ref : *this) {
- if (*ref == nullptr) {
- // Need to skip null entries to make it possible to do the
- // non-null check after the call back.
- continue;
- }
- callback(ref, arg, root_info);
- DCHECK(*ref != nullptr);
+ root_visitor.VisitRootIfNonNull(*ref);
}
}
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index 576a604..25b0281 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -218,7 +218,7 @@ class IrtEntry {
uint32_t serial_;
GcRoot<mirror::Object> references_[kIRTPrevCount];
};
-static_assert(sizeof(IrtEntry) == (1 + kIRTPrevCount) * sizeof(uintptr_t),
+static_assert(sizeof(IrtEntry) == (1 + kIRTPrevCount) * sizeof(uint32_t),
"Unexpected sizeof(IrtEntry)");
class IrtIterator {
@@ -233,9 +233,9 @@ class IrtIterator {
return *this;
}
- mirror::Object** operator*() {
+ GcRoot<mirror::Object>* operator*() {
// This does not have a read barrier as this is used to visit roots.
- return table_[i_].GetReference()->AddressWithoutBarrier();
+ return table_[i_].GetReference();
}
bool equals(const IrtIterator& rhs) const {
@@ -320,7 +320,7 @@ class IndirectReferenceTable {
return IrtIterator(table_, Capacity(), Capacity());
}
- void VisitRoots(RootCallback* callback, void* arg, const RootInfo& root_info)
+ void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
uint32_t GetSegmentState() const {
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 9adb4ac..dea157a 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -1077,13 +1077,14 @@ void Instrumentation::PopMethodForUnwind(Thread* self, bool is_deoptimization) c
}
}
-void Instrumentation::VisitRoots(RootCallback* callback, void* arg) {
+void Instrumentation::VisitRoots(RootVisitor* visitor) {
WriterMutexLock mu(Thread::Current(), deoptimized_methods_lock_);
if (IsDeoptimizedMethodsEmpty()) {
return;
}
+ BufferedRootVisitor<128> roots(visitor, RootInfo(kRootVMInternal));
for (auto pair : deoptimized_methods_) {
- pair.second.VisitRoot(callback, arg, RootInfo(kRootVMInternal));
+ roots.VisitRoot(pair.second);
}
}
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 8972f3a..77314c6 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -345,7 +345,7 @@ class Instrumentation {
void InstallStubsForMethod(mirror::ArtMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void VisitRoots(RootCallback* callback, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
LOCKS_EXCLUDED(deoptimized_methods_lock_);
private:
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 19bfc4e..8e85435 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -53,14 +53,14 @@ void InternTable::DumpForSigQuit(std::ostream& os) const {
os << "Intern table: " << StrongSize() << " strong; " << WeakSize() << " weak\n";
}
-void InternTable::VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags) {
+void InternTable::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
if ((flags & kVisitRootFlagAllRoots) != 0) {
- strong_interns_.VisitRoots(callback, arg);
+ strong_interns_.VisitRoots(visitor);
} else if ((flags & kVisitRootFlagNewRoots) != 0) {
for (auto& root : new_strong_intern_roots_) {
mirror::String* old_ref = root.Read<kWithoutReadBarrier>();
- root.VisitRoot(callback, arg, RootInfo(kRootInternedString));
+ root.VisitRoot(visitor, RootInfo(kRootInternedString));
mirror::String* new_ref = root.Read<kWithoutReadBarrier>();
if (new_ref != old_ref) {
// The GC moved a root in the log. Need to search the strong interns and update the
@@ -335,12 +335,13 @@ void InternTable::Table::Insert(mirror::String* s) {
post_zygote_table_.Insert(GcRoot<mirror::String>(s));
}
-void InternTable::Table::VisitRoots(RootCallback* callback, void* arg) {
+void InternTable::Table::VisitRoots(RootVisitor* visitor) {
+ BufferedRootVisitor<128> buffered_visitor(visitor, RootInfo(kRootInternedString));
for (auto& intern : pre_zygote_table_) {
- intern.VisitRoot(callback, arg, RootInfo(kRootInternedString));
+ buffered_visitor.VisitRoot(intern);
}
for (auto& intern : post_zygote_table_) {
- intern.VisitRoot(callback, arg, RootInfo(kRootInternedString));
+ buffered_visitor.VisitRoot(intern);
}
}
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 2e31b7e..200a764 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -80,7 +80,7 @@ class InternTable {
// Total number of strongly live interned strings.
size_t WeakSize() const LOCKS_EXCLUDED(Locks::intern_table_lock_);
- void VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags)
+ void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void DumpForSigQuit(std::ostream& os) const;
@@ -125,7 +125,7 @@ class InternTable {
void Remove(mirror::String* s)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
- void VisitRoots(RootCallback* callback, void* arg)
+ void VisitRoots(RootVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
void SweepWeaks(IsMarkedCallback* callback, void* arg)
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index b1e4193..9af8102 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -718,7 +718,7 @@ static void UnstartedJNIFloatIntBitsToFloat(Thread* self ATTRIBUTE_UNUSED,
result->SetI(args[0]);
}
-static void UnstartedJNIObjectInternalClone(Thread* self ATTRIBUTE_UNUSED,
+static void UnstartedJNIObjectInternalClone(Thread* self,
mirror::ArtMethod* method ATTRIBUTE_UNUSED,
mirror::Object* receiver,
uint32_t* args ATTRIBUTE_UNUSED,
@@ -727,7 +727,7 @@ static void UnstartedJNIObjectInternalClone(Thread* self ATTRIBUTE_UNUSED,
result->SetL(receiver->Clone(self));
}
-static void UnstartedJNIObjectNotifyAll(Thread* self ATTRIBUTE_UNUSED,
+static void UnstartedJNIObjectNotifyAll(Thread* self,
mirror::ArtMethod* method ATTRIBUTE_UNUSED,
mirror::Object* receiver,
uint32_t* args ATTRIBUTE_UNUSED,
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 09bfbf3..b795d72 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -748,19 +748,18 @@ void* JavaVMExt::FindCodeForNativeMethod(mirror::ArtMethod* m) {
void JavaVMExt::SweepJniWeakGlobals(IsMarkedCallback* callback, void* arg) {
MutexLock mu(Thread::Current(), weak_globals_lock_);
- for (mirror::Object** entry : weak_globals_) {
- // Since this is called by the GC, we don't need a read barrier.
- mirror::Object* obj = *entry;
- if (obj == nullptr) {
- // Need to skip null here to distinguish between null entries
- // and cleared weak ref entries.
- continue;
- }
- mirror::Object* new_obj = callback(obj, arg);
- if (new_obj == nullptr) {
- new_obj = Runtime::Current()->GetClearedJniWeakGlobal();
+ Runtime* const runtime = Runtime::Current();
+ for (auto* entry : weak_globals_) {
+ // Need to skip null here to distinguish between null entries and cleared weak ref entries.
+ if (!entry->IsNull()) {
+ // Since this is called by the GC, we don't need a read barrier.
+ mirror::Object* obj = entry->Read<kWithoutReadBarrier>();
+ mirror::Object* new_obj = callback(obj, arg);
+ if (new_obj == nullptr) {
+ new_obj = runtime->GetClearedJniWeakGlobal();
+ }
+ *entry = GcRoot<mirror::Object>(new_obj);
}
- *entry = new_obj;
}
}
@@ -769,10 +768,10 @@ void JavaVMExt::TrimGlobals() {
globals_.Trim();
}
-void JavaVMExt::VisitRoots(RootCallback* callback, void* arg) {
+void JavaVMExt::VisitRoots(RootVisitor* visitor) {
Thread* self = Thread::Current();
ReaderMutexLock mu(self, globals_lock_);
- globals_.VisitRoots(callback, arg, RootInfo(kRootJNIGlobal));
+ globals_.VisitRoots(visitor, RootInfo(kRootJNIGlobal));
// The weak_globals table is visited by the GC itself (because it mutates the table).
}
diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h
index 037fbe5..deec6a9 100644
--- a/runtime/java_vm_ext.h
+++ b/runtime/java_vm_ext.h
@@ -103,7 +103,7 @@ class JavaVMExt : public JavaVM {
bool SetCheckJniEnabled(bool enabled);
- void VisitRoots(RootCallback* callback, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void DisallowNewWeakGlobals() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void AllowNewWeakGlobals() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 7f04992..6452f31 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -196,8 +196,8 @@ inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_c
}
template<class T>
-inline void PrimitiveArray<T>::VisitRoots(RootCallback* callback, void* arg) {
- array_class_.VisitRootIfNonNull(callback, arg, RootInfo(kRootStickyClass));
+inline void PrimitiveArray<T>::VisitRoots(RootVisitor* visitor) {
+ array_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
}
template<typename T>
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 83e3688..115fcf2 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -166,8 +166,7 @@ class MANAGED PrimitiveArray : public Array {
array_class_ = GcRoot<Class>(nullptr);
}
- static void VisitRoots(RootCallback* callback, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
static GcRoot<Class> array_class_;
diff --git a/runtime/mirror/art_field.cc b/runtime/mirror/art_field.cc
index 4c36753..83602d4 100644
--- a/runtime/mirror/art_field.cc
+++ b/runtime/mirror/art_field.cc
@@ -55,8 +55,8 @@ void ArtField::SetOffset(MemberOffset num_bytes) {
SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtField, offset_), num_bytes.Uint32Value());
}
-void ArtField::VisitRoots(RootCallback* callback, void* arg) {
- java_lang_reflect_ArtField_.VisitRootIfNonNull(callback, arg, RootInfo(kRootStickyClass));
+void ArtField::VisitRoots(RootVisitor* visitor) {
+ java_lang_reflect_ArtField_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
}
// TODO: we could speed up the search if fields are ordered by offsets.
diff --git a/runtime/mirror/art_field.h b/runtime/mirror/art_field.h
index d640165..9d95cb9 100644
--- a/runtime/mirror/art_field.h
+++ b/runtime/mirror/art_field.h
@@ -138,7 +138,7 @@ class MANAGED ArtField FINAL : public Object {
static void SetClass(Class* java_lang_reflect_ArtField);
static void ResetClass();
- static void VisitRoots(RootCallback* callback, void* arg)
+ static void VisitRoots(RootVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsVolatile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index c1f7594..edbbb4a 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -61,8 +61,8 @@ ArtMethod* ArtMethod::FromReflectedMethod(const ScopedObjectAccessAlreadyRunnabl
}
-void ArtMethod::VisitRoots(RootCallback* callback, void* arg) {
- java_lang_reflect_ArtMethod_.VisitRootIfNonNull(callback, arg, RootInfo(kRootStickyClass));
+void ArtMethod::VisitRoots(RootVisitor* visitor) {
+ java_lang_reflect_ArtMethod_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
}
mirror::String* ArtMethod::GetNameAsString(Thread* self) {
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index 82e5d00..22481ce 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -488,7 +488,7 @@ class MANAGED ArtMethod FINAL : public Object {
static void ResetClass();
- static void VisitRoots(RootCallback* callback, void* arg)
+ static void VisitRoots(RootVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const DexFile* GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 29851a9..8fb8147 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -51,8 +51,8 @@ void Class::ResetClass() {
java_lang_Class_ = GcRoot<Class>(nullptr);
}
-void Class::VisitRoots(RootCallback* callback, void* arg) {
- java_lang_Class_.VisitRootIfNonNull(callback, arg, RootInfo(kRootStickyClass));
+void Class::VisitRoots(RootVisitor* visitor) {
+ java_lang_Class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
}
void Class::SetStatus(Handle<Class> h_this, Status new_status, Thread* self) {
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 2dff383..b82a58f 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -971,7 +971,7 @@ class MANAGED Class FINAL : public Object {
// Can't call this SetClass or else gets called instead of Object::SetClass in places.
static void SetClassClass(Class* java_lang_Class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void ResetClass();
- static void VisitRoots(RootCallback* callback, void* arg)
+ static void VisitRoots(RootVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// When class is verified, set the kAccPreverified flag on each method.
diff --git a/runtime/mirror/field.cc b/runtime/mirror/field.cc
index 1724682..82cc26e 100644
--- a/runtime/mirror/field.cc
+++ b/runtime/mirror/field.cc
@@ -48,9 +48,9 @@ void Field::ResetArrayClass() {
array_class_ = GcRoot<Class>(nullptr);
}
-void Field::VisitRoots(RootCallback* callback, void* arg) {
- static_class_.VisitRootIfNonNull(callback, arg, RootInfo(kRootStickyClass));
- array_class_.VisitRootIfNonNull(callback, arg, RootInfo(kRootStickyClass));
+void Field::VisitRoots(RootVisitor* visitor) {
+ static_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
+ array_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
}
ArtField* Field::GetArtField() {
diff --git a/runtime/mirror/field.h b/runtime/mirror/field.h
index f54340a..cea06f5 100644
--- a/runtime/mirror/field.h
+++ b/runtime/mirror/field.h
@@ -89,7 +89,7 @@ class MANAGED Field : public AccessibleObject {
static void ResetArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void VisitRoots(RootCallback* callback, void* arg)
+ static void VisitRoots(RootVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Slow, try to use only for PrettyField and such.
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index b730670..cfc8549 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -90,6 +90,9 @@ class MANAGED LOCKABLE Object {
void SetClass(Class* new_klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Object* GetReadBarrierPointer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+#ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
+ NO_RETURN
+#endif
void SetReadBarrierPointer(Object* rb_ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object* rb_ptr)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h
index b63d13d..5edda8b 100644
--- a/runtime/mirror/object_reference.h
+++ b/runtime/mirror/object_reference.h
@@ -43,6 +43,11 @@ class MANAGED ObjectReference {
void Clear() {
reference_ = 0;
+ DCHECK(IsNull());
+ }
+
+ bool IsNull() const {
+ return reference_ == 0;
}
uint32_t AsVRegValue() const {
@@ -86,6 +91,23 @@ class MANAGED HeapReference : public ObjectReference<kPoisonHeapReferences, Mirr
: ObjectReference<kPoisonHeapReferences, MirrorType>(mirror_ptr) {}
};
+// Standard compressed reference used in the runtime. Used for StackRefernce and GC roots.
+template<class MirrorType>
+class MANAGED CompressedReference : public mirror::ObjectReference<false, MirrorType> {
+ public:
+ CompressedReference<MirrorType>() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : mirror::ObjectReference<false, MirrorType>(nullptr) {}
+
+ static CompressedReference<MirrorType> FromMirrorPtr(MirrorType* p)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return CompressedReference<MirrorType>(p);
+ }
+
+ private:
+ CompressedReference<MirrorType>(MirrorType* p) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : mirror::ObjectReference<false, MirrorType>(p) {}
+};
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/reference.cc b/runtime/mirror/reference.cc
index 35130e8..70bcf92 100644
--- a/runtime/mirror/reference.cc
+++ b/runtime/mirror/reference.cc
@@ -16,6 +16,9 @@
#include "reference.h"
+#include "mirror/art_method.h"
+#include "gc_root-inl.h"
+
namespace art {
namespace mirror {
@@ -32,8 +35,8 @@ void Reference::ResetClass() {
java_lang_ref_Reference_ = GcRoot<Class>(nullptr);
}
-void Reference::VisitRoots(RootCallback* callback, void* arg) {
- java_lang_ref_Reference_.VisitRootIfNonNull(callback, arg, RootInfo(kRootStickyClass));
+void Reference::VisitRoots(RootVisitor* visitor) {
+ java_lang_ref_Reference_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
}
} // namespace mirror
diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h
index 69ef69c..c11d79d 100644
--- a/runtime/mirror/reference.h
+++ b/runtime/mirror/reference.h
@@ -100,7 +100,7 @@ class MANAGED Reference : public Object {
}
static void SetClass(Class* klass);
static void ResetClass();
- static void VisitRoots(RootCallback* callback, void* arg);
+ static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
// Note: This avoids a read barrier, it should only be used by the GC.
diff --git a/runtime/mirror/stack_trace_element.cc b/runtime/mirror/stack_trace_element.cc
index c2a67e8..ec2b495 100644
--- a/runtime/mirror/stack_trace_element.cc
+++ b/runtime/mirror/stack_trace_element.cc
@@ -67,8 +67,8 @@ void StackTraceElement::Init(Handle<String> declaring_class, Handle<String> meth
line_number);
}
-void StackTraceElement::VisitRoots(RootCallback* callback, void* arg) {
- java_lang_StackTraceElement_.VisitRootIfNonNull(callback, arg, RootInfo(kRootStickyClass));
+void StackTraceElement::VisitRoots(RootVisitor* visitor) {
+ java_lang_StackTraceElement_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
}
diff --git a/runtime/mirror/stack_trace_element.h b/runtime/mirror/stack_trace_element.h
index 70acd1c..dc7131e 100644
--- a/runtime/mirror/stack_trace_element.h
+++ b/runtime/mirror/stack_trace_element.h
@@ -54,7 +54,7 @@ class MANAGED StackTraceElement FINAL : public Object {
static void SetClass(Class* java_lang_StackTraceElement);
static void ResetClass();
- static void VisitRoots(RootCallback* callback, void* arg)
+ static void VisitRoots(RootVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static Class* GetStackTraceElement() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(!java_lang_StackTraceElement_.IsNull());
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index e7c88c5..bd6a63c 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -253,8 +253,8 @@ int32_t String::CompareTo(String* rhs) {
return countDiff;
}
-void String::VisitRoots(RootCallback* callback, void* arg) {
- java_lang_String_.VisitRootIfNonNull(callback, arg, RootInfo(kRootStickyClass));
+void String::VisitRoots(RootVisitor* visitor) {
+ java_lang_String_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
}
} // namespace mirror
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index 6c22b9b..0670d0b 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -127,7 +127,7 @@ class MANAGED String FINAL : public Object {
static void SetClass(Class* java_lang_String);
static void ResetClass();
- static void VisitRoots(RootCallback* callback, void* arg)
+ static void VisitRoots(RootVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// TODO: Make this private. It's only used on ObjectTest at the moment.
diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc
index fdfeb47..b564649 100644
--- a/runtime/mirror/throwable.cc
+++ b/runtime/mirror/throwable.cc
@@ -144,8 +144,8 @@ void Throwable::ResetClass() {
java_lang_Throwable_ = GcRoot<Class>(nullptr);
}
-void Throwable::VisitRoots(RootCallback* callback, void* arg) {
- java_lang_Throwable_.VisitRootIfNonNull(callback, arg, RootInfo(kRootStickyClass));
+void Throwable::VisitRoots(RootVisitor* visitor) {
+ java_lang_Throwable_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
}
} // namespace mirror
diff --git a/runtime/mirror/throwable.h b/runtime/mirror/throwable.h
index c22475b..9cc0b6f 100644
--- a/runtime/mirror/throwable.h
+++ b/runtime/mirror/throwable.h
@@ -55,7 +55,7 @@ class MANAGED Throwable : public Object {
static void SetClass(Class* java_lang_Throwable);
static void ResetClass();
- static void VisitRoots(RootCallback* callback, void* arg)
+ static void VisitRoots(RootVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 6e3f1bc..760038a 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -248,13 +248,20 @@ static void VMRuntime_runHeapTasks(JNIEnv* env, jobject) {
typedef std::map<std::string, mirror::String*> StringTable;
-static void PreloadDexCachesStringsCallback(mirror::Object** root, void* arg,
- const RootInfo& /*root_info*/)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- StringTable& table = *reinterpret_cast<StringTable*>(arg);
- mirror::String* string = const_cast<mirror::Object*>(*root)->AsString();
- table[string->ToModifiedUtf8()] = string;
-}
+class PreloadDexCachesStringsVisitor : public SingleRootVisitor {
+ public:
+ explicit PreloadDexCachesStringsVisitor(StringTable* table) : table_(table) {
+ }
+
+ void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
+ OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::String* string = root->AsString();
+ table_->operator[](string->ToModifiedUtf8()) = string;
+ }
+
+ private:
+ StringTable* const table_;
+};
// Based on ClassLinker::ResolveString.
static void PreloadDexCachesResolveString(Handle<mirror::DexCache> dex_cache, uint32_t string_idx,
@@ -469,8 +476,8 @@ static void VMRuntime_preloadDexCaches(JNIEnv* env, jobject) {
// We use a std::map to avoid heap allocating StringObjects to lookup in gDvm.literalStrings
StringTable strings;
if (kPreloadDexCachesStrings) {
- runtime->GetInternTable()->VisitRoots(PreloadDexCachesStringsCallback, &strings,
- kVisitRootFlagAllRoots);
+ PreloadDexCachesStringsVisitor visitor(&strings);
+ runtime->GetInternTable()->VisitRoots(&visitor, kVisitRootFlagAllRoots);
}
const std::vector<const DexFile*>& boot_class_path = linker->GetBootClassPath();
diff --git a/runtime/oat.h b/runtime/oat.h
index 120de6d..de95fef 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -156,6 +156,8 @@ class PACKED(4) OatMethodOffsets {
~OatMethodOffsets();
+ OatMethodOffsets& operator=(const OatMethodOffsets&) = default;
+
uint32_t code_offset_;
};
@@ -169,6 +171,8 @@ class PACKED(4) OatQuickMethodHeader {
~OatQuickMethodHeader();
+ OatQuickMethodHeader& operator=(const OatQuickMethodHeader&) = default;
+
// The offset in bytes from the start of the mapping table to the end of the header.
uint32_t mapping_table_offset_;
// The offset in bytes from the start of the vmap table to the end of the header.
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 2b9ef9d..73a8c8e 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -134,8 +134,11 @@ class OatFile FINAL {
OatMethod(const uint8_t* base, const uint32_t code_offset)
: begin_(base), code_offset_(code_offset) {
}
+ OatMethod(const OatMethod&) = default;
~OatMethod() {}
+ OatMethod& operator=(const OatMethod&) = default;
+
// A representation of an invalid OatMethod, used when an OatMethod or OatClass can't be found.
// See ClassLinker::FindOatMethodFor.
static const OatMethod Invalid() {
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 89779bc..c23f744 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -413,7 +413,6 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize
}
UNREACHABLE();
- return false;
}
using M = RuntimeArgumentMap;
diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h
index 8cccec8..7ee4118 100644
--- a/runtime/quick_exception_handler.h
+++ b/runtime/quick_exception_handler.h
@@ -38,7 +38,7 @@ class QuickExceptionHandler {
QuickExceptionHandler(Thread* self, bool is_deoptimization)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ~QuickExceptionHandler() {
+ NO_RETURN ~QuickExceptionHandler() {
LOG(FATAL) << "UNREACHABLE"; // Expected to take long jump.
UNREACHABLE();
}
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index c74fded..5631ff4 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -111,6 +111,48 @@ inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root) {
}
}
+// TODO: Reduce copy paste
+template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kMaybeDuringStartup>
+inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<MirrorType>* root) {
+ MirrorType* ref = root->AsMirrorPtr();
+ const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
+ if (with_read_barrier && kUseBakerReadBarrier) {
+ if (kMaybeDuringStartup && IsDuringStartup()) {
+ // During startup, the heap may not be initialized yet. Just
+ // return the given ref.
+ return ref;
+ }
+ // TODO: separate the read barrier code from the collector code more.
+ if (Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarking()) {
+ ref = reinterpret_cast<MirrorType*>(Mark(ref));
+ }
+ AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
+ return ref;
+ } else if (with_read_barrier && kUseBrooksReadBarrier) {
+ // To be implemented.
+ return ref;
+ } else if (with_read_barrier && kUseTableLookupReadBarrier) {
+ if (kMaybeDuringStartup && IsDuringStartup()) {
+ // During startup, the heap may not be initialized yet. Just
+ // return the given ref.
+ return ref;
+ }
+ if (Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
+ auto old_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
+ ref = reinterpret_cast<MirrorType*>(Mark(ref));
+ auto new_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
+ // Update the field atomically. This may fail if mutator updates before us, but it's ok.
+ auto* atomic_root =
+ reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root);
+ atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, new_ref);
+ }
+ AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
+ return ref;
+ } else {
+ return ref;
+ }
+}
+
inline bool ReadBarrier::IsDuringStartup() {
gc::Heap* heap = Runtime::Current()->GetHeap();
if (heap == nullptr) {
diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h
index 474b46f..471b37c 100644
--- a/runtime/read_barrier.h
+++ b/runtime/read_barrier.h
@@ -20,6 +20,7 @@
#include "base/mutex.h"
#include "base/macros.h"
#include "jni.h"
+#include "mirror/object_reference.h"
#include "offsets.h"
#include "read_barrier_c.h"
@@ -58,6 +59,13 @@ class ReadBarrier {
ALWAYS_INLINE static MirrorType* BarrierForRoot(MirrorType** root)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // It's up to the implementation whether the given root gets updated
+ // whereas the return value must be an updated reference.
+ template <typename MirrorType, ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
+ bool kMaybeDuringStartup = false>
+ ALWAYS_INLINE static MirrorType* BarrierForRoot(mirror::CompressedReference<MirrorType>* root)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
static bool IsDuringStartup();
// Without the holder object.
diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc
index 357d454..ac36447 100644
--- a/runtime/reference_table.cc
+++ b/runtime/reference_table.cc
@@ -237,9 +237,10 @@ void ReferenceTable::Dump(std::ostream& os, Table& entries) {
DumpSummaryLine(os, prev, GetElementCount(prev), identical, equiv);
}
-void ReferenceTable::VisitRoots(RootCallback* visitor, void* arg, const RootInfo& root_info) {
+void ReferenceTable::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) {
+ BufferedRootVisitor<128> buffered_visitor(visitor, root_info);
for (GcRoot<mirror::Object>& root : entries_) {
- root.VisitRoot(visitor, arg, root_info);
+ buffered_visitor.VisitRoot(root);
}
}
diff --git a/runtime/reference_table.h b/runtime/reference_table.h
index 22cf1cd..94f16b6 100644
--- a/runtime/reference_table.h
+++ b/runtime/reference_table.h
@@ -49,7 +49,8 @@ class ReferenceTable {
void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void VisitRoots(RootCallback* visitor, void* arg, const RootInfo& root_info);
+ void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
typedef std::vector<GcRoot<mirror::Object>,
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 497123b..1cd0a96 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1291,67 +1291,67 @@ mirror::Throwable* Runtime::GetPreAllocatedNoClassDefFoundError() {
return ncdfe;
}
-void Runtime::VisitConstantRoots(RootCallback* callback, void* arg) {
+void Runtime::VisitConstantRoots(RootVisitor* visitor) {
// Visit the classes held as static in mirror classes, these can be visited concurrently and only
// need to be visited once per GC since they never change.
- mirror::ArtField::VisitRoots(callback, arg);
- mirror::ArtMethod::VisitRoots(callback, arg);
- mirror::Class::VisitRoots(callback, arg);
- mirror::Reference::VisitRoots(callback, arg);
- mirror::StackTraceElement::VisitRoots(callback, arg);
- mirror::String::VisitRoots(callback, arg);
- mirror::Throwable::VisitRoots(callback, arg);
- mirror::Field::VisitRoots(callback, arg);
+ mirror::ArtField::VisitRoots(visitor);
+ mirror::ArtMethod::VisitRoots(visitor);
+ mirror::Class::VisitRoots(visitor);
+ mirror::Reference::VisitRoots(visitor);
+ mirror::StackTraceElement::VisitRoots(visitor);
+ mirror::String::VisitRoots(visitor);
+ mirror::Throwable::VisitRoots(visitor);
+ mirror::Field::VisitRoots(visitor);
// Visit all the primitive array types classes.
- mirror::PrimitiveArray<uint8_t>::VisitRoots(callback, arg); // BooleanArray
- mirror::PrimitiveArray<int8_t>::VisitRoots(callback, arg); // ByteArray
- mirror::PrimitiveArray<uint16_t>::VisitRoots(callback, arg); // CharArray
- mirror::PrimitiveArray<double>::VisitRoots(callback, arg); // DoubleArray
- mirror::PrimitiveArray<float>::VisitRoots(callback, arg); // FloatArray
- mirror::PrimitiveArray<int32_t>::VisitRoots(callback, arg); // IntArray
- mirror::PrimitiveArray<int64_t>::VisitRoots(callback, arg); // LongArray
- mirror::PrimitiveArray<int16_t>::VisitRoots(callback, arg); // ShortArray
-}
-
-void Runtime::VisitConcurrentRoots(RootCallback* callback, void* arg, VisitRootFlags flags) {
- intern_table_->VisitRoots(callback, arg, flags);
- class_linker_->VisitRoots(callback, arg, flags);
+ mirror::PrimitiveArray<uint8_t>::VisitRoots(visitor); // BooleanArray
+ mirror::PrimitiveArray<int8_t>::VisitRoots(visitor); // ByteArray
+ mirror::PrimitiveArray<uint16_t>::VisitRoots(visitor); // CharArray
+ mirror::PrimitiveArray<double>::VisitRoots(visitor); // DoubleArray
+ mirror::PrimitiveArray<float>::VisitRoots(visitor); // FloatArray
+ mirror::PrimitiveArray<int32_t>::VisitRoots(visitor); // IntArray
+ mirror::PrimitiveArray<int64_t>::VisitRoots(visitor); // LongArray
+ mirror::PrimitiveArray<int16_t>::VisitRoots(visitor); // ShortArray
+}
+
+void Runtime::VisitConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags) {
+ intern_table_->VisitRoots(visitor, flags);
+ class_linker_->VisitRoots(visitor, flags);
if ((flags & kVisitRootFlagNewRoots) == 0) {
// Guaranteed to have no new roots in the constant roots.
- VisitConstantRoots(callback, arg);
+ VisitConstantRoots(visitor);
}
}
-void Runtime::VisitTransactionRoots(RootCallback* callback, void* arg) {
+void Runtime::VisitTransactionRoots(RootVisitor* visitor) {
if (preinitialization_transaction_ != nullptr) {
- preinitialization_transaction_->VisitRoots(callback, arg);
+ preinitialization_transaction_->VisitRoots(visitor);
}
}
-void Runtime::VisitNonThreadRoots(RootCallback* callback, void* arg) {
- java_vm_->VisitRoots(callback, arg);
- sentinel_.VisitRootIfNonNull(callback, arg, RootInfo(kRootVMInternal));
- pre_allocated_OutOfMemoryError_.VisitRootIfNonNull(callback, arg, RootInfo(kRootVMInternal));
- resolution_method_.VisitRoot(callback, arg, RootInfo(kRootVMInternal));
- pre_allocated_NoClassDefFoundError_.VisitRootIfNonNull(callback, arg, RootInfo(kRootVMInternal));
- imt_conflict_method_.VisitRootIfNonNull(callback, arg, RootInfo(kRootVMInternal));
- imt_unimplemented_method_.VisitRootIfNonNull(callback, arg, RootInfo(kRootVMInternal));
- default_imt_.VisitRootIfNonNull(callback, arg, RootInfo(kRootVMInternal));
+void Runtime::VisitNonThreadRoots(RootVisitor* visitor) {
+ java_vm_->VisitRoots(visitor);
+ sentinel_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
+ pre_allocated_OutOfMemoryError_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
+ resolution_method_.VisitRoot(visitor, RootInfo(kRootVMInternal));
+ pre_allocated_NoClassDefFoundError_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
+ imt_conflict_method_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
+ imt_unimplemented_method_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
+ default_imt_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
- callee_save_methods_[i].VisitRootIfNonNull(callback, arg, RootInfo(kRootVMInternal));
+ callee_save_methods_[i].VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
}
- verifier::MethodVerifier::VisitStaticRoots(callback, arg);
- VisitTransactionRoots(callback, arg);
- instrumentation_.VisitRoots(callback, arg);
+ verifier::MethodVerifier::VisitStaticRoots(visitor);
+ VisitTransactionRoots(visitor);
+ instrumentation_.VisitRoots(visitor);
}
-void Runtime::VisitNonConcurrentRoots(RootCallback* callback, void* arg) {
- thread_list_->VisitRoots(callback, arg);
- VisitNonThreadRoots(callback, arg);
+void Runtime::VisitNonConcurrentRoots(RootVisitor* visitor) {
+ thread_list_->VisitRoots(visitor);
+ VisitNonThreadRoots(visitor);
}
-void Runtime::VisitThreadRoots(RootCallback* callback, void* arg) {
- thread_list_->VisitRoots(callback, arg);
+void Runtime::VisitThreadRoots(RootVisitor* visitor) {
+ thread_list_->VisitRoots(visitor);
}
size_t Runtime::FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_callback,
@@ -1359,12 +1359,12 @@ size_t Runtime::FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_call
return thread_list_->FlipThreadRoots(thread_flip_visitor, flip_callback, collector);
}
-void Runtime::VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags) {
- VisitNonConcurrentRoots(callback, arg);
- VisitConcurrentRoots(callback, arg, flags);
+void Runtime::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
+ VisitNonConcurrentRoots(visitor);
+ VisitConcurrentRoots(visitor, flags);
}
-void Runtime::VisitImageRoots(RootCallback* callback, void* arg) {
+void Runtime::VisitImageRoots(RootVisitor* visitor) {
for (auto* space : GetHeap()->GetContinuousSpaces()) {
if (space->IsImageSpace()) {
auto* image_space = space->AsImageSpace();
@@ -1373,7 +1373,7 @@ void Runtime::VisitImageRoots(RootCallback* callback, void* arg) {
auto* obj = image_header.GetImageRoot(static_cast<ImageHeader::ImageRoot>(i));
if (obj != nullptr) {
auto* after_obj = obj;
- callback(&after_obj, arg, RootInfo(kRootStickyClass));
+ visitor->VisitRoot(&after_obj, RootInfo(kRootStickyClass));
CHECK_EQ(after_obj, obj);
}
}
diff --git a/runtime/runtime.h b/runtime/runtime.h
index af6abbd..baa4d18 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -296,27 +296,27 @@ class Runtime {
// Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If
// clean_dirty is true then dirty roots will be marked as non-dirty after visiting.
- void VisitRoots(RootCallback* visitor, void* arg, VisitRootFlags flags = kVisitRootFlagAllRoots)
+ void VisitRoots(RootVisitor* visitor, VisitRootFlags flags = kVisitRootFlagAllRoots)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Visit image roots, only used for hprof since the GC uses the image space mod union table
// instead.
- void VisitImageRoots(RootCallback* visitor, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void VisitImageRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Visit all of the roots we can do safely do concurrently.
- void VisitConcurrentRoots(RootCallback* visitor, void* arg,
+ void VisitConcurrentRoots(RootVisitor* visitor,
VisitRootFlags flags = kVisitRootFlagAllRoots)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Visit all of the non thread roots, we can do this with mutators unpaused.
- void VisitNonThreadRoots(RootCallback* visitor, void* arg)
+ void VisitNonThreadRoots(RootVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void VisitTransactionRoots(RootCallback* visitor, void* arg)
+ void VisitTransactionRoots(RootVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Visit all of the thread roots.
- void VisitThreadRoots(RootCallback* visitor, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void VisitThreadRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Flip thread roots from from-space refs to to-space refs.
size_t FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_callback,
@@ -324,7 +324,7 @@ class Runtime {
LOCKS_EXCLUDED(Locks::mutator_lock_);
// Visit all other roots which must be done with mutators suspended.
- void VisitNonConcurrentRoots(RootCallback* visitor, void* arg)
+ void VisitNonConcurrentRoots(RootVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Sweep system weaks, the system weak is deleted if the visitor return nullptr. Otherwise, the
@@ -334,7 +334,7 @@ class Runtime {
// Constant roots are the roots which never change after the runtime is initialized, they only
// need to be visited once per GC cycle.
- void VisitConstantRoots(RootCallback* callback, void* arg)
+ void VisitConstantRoots(RootVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns a special method that calls into a trampoline for runtime method resolution
diff --git a/runtime/safe_map.h b/runtime/safe_map.h
index f9d81dc..402c7e9 100644
--- a/runtime/safe_map.h
+++ b/runtime/safe_map.h
@@ -44,6 +44,7 @@ class SafeMap {
typedef typename ::std::map<K, V, Comparator, Allocator>::value_type value_type;
SafeMap() = default;
+ SafeMap(const SafeMap&) = default;
explicit SafeMap(const key_compare& cmp, const allocator_type& allocator = allocator_type())
: map_(cmp, allocator) {
}
diff --git a/runtime/stack.h b/runtime/stack.h
index aab54ba..fbb0aa4 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -59,19 +59,7 @@ std::ostream& operator<<(std::ostream& os, const VRegKind& rhs);
// A reference from the shadow stack to a MirrorType object within the Java heap.
template<class MirrorType>
-class MANAGED StackReference : public mirror::ObjectReference<false, MirrorType> {
- public:
- StackReference<MirrorType>() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : mirror::ObjectReference<false, MirrorType>(nullptr) {}
-
- static StackReference<MirrorType> FromMirrorPtr(MirrorType* p)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return StackReference<MirrorType>(p);
- }
-
- private:
- StackReference<MirrorType>(MirrorType* p) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : mirror::ObjectReference<false, MirrorType>(p) {}
+class MANAGED StackReference : public mirror::CompressedReference<MirrorType> {
};
// ShadowFrame has 2 possible layouts:
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 8a6422d..79d2b13 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1196,26 +1196,37 @@ void Thread::AssertNoPendingExceptionForNewException(const char* msg) const {
}
}
-static void MonitorExitVisitor(mirror::Object** object, void* arg, const RootInfo& /*root_info*/)
- NO_THREAD_SAFETY_ANALYSIS {
- Thread* self = reinterpret_cast<Thread*>(arg);
- mirror::Object* entered_monitor = *object;
- if (self->HoldsLock(entered_monitor)) {
- LOG(WARNING) << "Calling MonitorExit on object "
- << object << " (" << PrettyTypeOf(entered_monitor) << ")"
- << " left locked by native thread "
- << *Thread::Current() << " which is detaching";
- entered_monitor->MonitorExit(self);
+class MonitorExitVisitor : public SingleRootVisitor {
+ public:
+ explicit MonitorExitVisitor(Thread* self) : self_(self) { }
+
+ // NO_THREAD_SAFETY_ANALYSIS due to MonitorExit.
+ void VisitRoot(mirror::Object* entered_monitor, const RootInfo& info ATTRIBUTE_UNUSED)
+ OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
+ if (self_->HoldsLock(entered_monitor)) {
+ LOG(WARNING) << "Calling MonitorExit on object "
+ << entered_monitor << " (" << PrettyTypeOf(entered_monitor) << ")"
+ << " left locked by native thread "
+ << *Thread::Current() << " which is detaching";
+ entered_monitor->MonitorExit(self_);
+ }
}
-}
+
+ private:
+ Thread* const self_;
+};
void Thread::Destroy() {
Thread* self = this;
DCHECK_EQ(self, Thread::Current());
if (tlsPtr_.jni_env != nullptr) {
- // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited.
- tlsPtr_.jni_env->monitors.VisitRoots(MonitorExitVisitor, self, RootInfo(kRootVMInternal));
+ {
+ ScopedObjectAccess soa(self);
+ MonitorExitVisitor visitor(self);
+ // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited.
+ tlsPtr_.jni_env->monitors.VisitRoots(&visitor, RootInfo(kRootVMInternal));
+ }
// Release locally held global references which releasing may require the mutator lock.
if (tlsPtr_.jpeer != nullptr) {
// If pthread_create fails we don't have a jni env here.
@@ -1373,18 +1384,11 @@ bool Thread::HandleScopeContains(jobject obj) const {
return tlsPtr_.managed_stack.ShadowFramesContain(hs_entry);
}
-void Thread::HandleScopeVisitRoots(RootCallback* visitor, void* arg, uint32_t thread_id) {
+void Thread::HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id) {
+ BufferedRootVisitor<128> buffered_visitor(visitor, RootInfo(kRootNativeStack, thread_id));
for (HandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) {
- size_t num_refs = cur->NumberOfReferences();
- for (size_t j = 0; j < num_refs; ++j) {
- mirror::Object* object = cur->GetReference(j);
- if (object != nullptr) {
- mirror::Object* old_obj = object;
- visitor(&object, arg, RootInfo(kRootNativeStack, thread_id));
- if (old_obj != object) {
- cur->SetReference(j, object);
- }
- }
+ for (size_t j = 0, count = cur->NumberOfReferences(); j < count; ++j) {
+ buffered_visitor.VisitRootIfNonNull(cur->GetHandle(j).GetReference());
}
}
}
@@ -2084,7 +2088,7 @@ bool Thread::HoldsLock(mirror::Object* object) const {
template <typename RootVisitor>
class ReferenceMapVisitor : public StackVisitor {
public:
- ReferenceMapVisitor(Thread* thread, Context* context, const RootVisitor& visitor)
+ ReferenceMapVisitor(Thread* thread, Context* context, RootVisitor& visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: StackVisitor(thread, context), visitor_(visitor) {}
@@ -2248,55 +2252,50 @@ class ReferenceMapVisitor : public StackVisitor {
}
// Visitor for when we visit a root.
- const RootVisitor& visitor_;
+ RootVisitor& visitor_;
};
class RootCallbackVisitor {
public:
- RootCallbackVisitor(RootCallback* callback, void* arg, uint32_t tid)
- : callback_(callback), arg_(arg), tid_(tid) {}
+ RootCallbackVisitor(RootVisitor* visitor, uint32_t tid) : visitor_(visitor), tid_(tid) {}
- void operator()(mirror::Object** obj, size_t vreg, const StackVisitor* stack_visitor) const {
- callback_(obj, arg_, JavaFrameRootInfo(tid_, stack_visitor, vreg));
+ void operator()(mirror::Object** obj, size_t vreg, const StackVisitor* stack_visitor) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ visitor_->VisitRoot(obj, JavaFrameRootInfo(tid_, stack_visitor, vreg));
}
private:
- RootCallback* const callback_;
- void* const arg_;
+ RootVisitor* const visitor_;
const uint32_t tid_;
};
-void Thread::VisitRoots(RootCallback* visitor, void* arg) {
- uint32_t thread_id = GetThreadId();
- if (tlsPtr_.opeer != nullptr) {
- visitor(&tlsPtr_.opeer, arg, RootInfo(kRootThreadObject, thread_id));
- }
+void Thread::VisitRoots(RootVisitor* visitor) {
+ const uint32_t thread_id = GetThreadId();
+ visitor->VisitRootIfNonNull(&tlsPtr_.opeer, RootInfo(kRootThreadObject, thread_id));
if (tlsPtr_.exception != nullptr && tlsPtr_.exception != GetDeoptimizationException()) {
- visitor(reinterpret_cast<mirror::Object**>(&tlsPtr_.exception), arg,
- RootInfo(kRootNativeStack, thread_id));
- }
- if (tlsPtr_.monitor_enter_object != nullptr) {
- visitor(&tlsPtr_.monitor_enter_object, arg, RootInfo(kRootNativeStack, thread_id));
+ visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&tlsPtr_.exception),
+ RootInfo(kRootNativeStack, thread_id));
}
- tlsPtr_.jni_env->locals.VisitRoots(visitor, arg, RootInfo(kRootJNILocal, thread_id));
- tlsPtr_.jni_env->monitors.VisitRoots(visitor, arg, RootInfo(kRootJNIMonitor, thread_id));
- HandleScopeVisitRoots(visitor, arg, thread_id);
+ visitor->VisitRootIfNonNull(&tlsPtr_.monitor_enter_object, RootInfo(kRootNativeStack, thread_id));
+ tlsPtr_.jni_env->locals.VisitRoots(visitor, RootInfo(kRootJNILocal, thread_id));
+ tlsPtr_.jni_env->monitors.VisitRoots(visitor, RootInfo(kRootJNIMonitor, thread_id));
+ HandleScopeVisitRoots(visitor, thread_id);
if (tlsPtr_.debug_invoke_req != nullptr) {
- tlsPtr_.debug_invoke_req->VisitRoots(visitor, arg, RootInfo(kRootDebugger, thread_id));
+ tlsPtr_.debug_invoke_req->VisitRoots(visitor, RootInfo(kRootDebugger, thread_id));
}
if (tlsPtr_.single_step_control != nullptr) {
- tlsPtr_.single_step_control->VisitRoots(visitor, arg, RootInfo(kRootDebugger, thread_id));
+ tlsPtr_.single_step_control->VisitRoots(visitor, RootInfo(kRootDebugger, thread_id));
}
if (tlsPtr_.deoptimization_shadow_frame != nullptr) {
- RootCallbackVisitor visitorToCallback(visitor, arg, thread_id);
- ReferenceMapVisitor<RootCallbackVisitor> mapper(this, nullptr, visitorToCallback);
+ RootCallbackVisitor visitor_to_callback(visitor, thread_id);
+ ReferenceMapVisitor<RootCallbackVisitor> mapper(this, nullptr, visitor_to_callback);
for (ShadowFrame* shadow_frame = tlsPtr_.deoptimization_shadow_frame; shadow_frame != nullptr;
shadow_frame = shadow_frame->GetLink()) {
mapper.VisitShadowFrame(shadow_frame);
}
}
if (tlsPtr_.shadow_frame_under_construction != nullptr) {
- RootCallbackVisitor visitor_to_callback(visitor, arg, thread_id);
+ RootCallbackVisitor visitor_to_callback(visitor, thread_id);
ReferenceMapVisitor<RootCallbackVisitor> mapper(this, nullptr, visitor_to_callback);
for (ShadowFrame* shadow_frame = tlsPtr_.shadow_frame_under_construction;
shadow_frame != nullptr;
@@ -2305,33 +2304,34 @@ void Thread::VisitRoots(RootCallback* visitor, void* arg) {
}
}
if (tlsPtr_.method_verifier != nullptr) {
- tlsPtr_.method_verifier->VisitRoots(visitor, arg, RootInfo(kRootNativeStack, thread_id));
+ tlsPtr_.method_verifier->VisitRoots(visitor, RootInfo(kRootNativeStack, thread_id));
}
// Visit roots on this thread's stack
Context* context = GetLongJumpContext();
- RootCallbackVisitor visitor_to_callback(visitor, arg, thread_id);
+ RootCallbackVisitor visitor_to_callback(visitor, thread_id);
ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context, visitor_to_callback);
mapper.WalkStack();
ReleaseLongJumpContext(context);
for (instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) {
- if (frame.this_object_ != nullptr) {
- visitor(&frame.this_object_, arg, RootInfo(kRootVMInternal, thread_id));
- }
- DCHECK(frame.method_ != nullptr);
- visitor(reinterpret_cast<mirror::Object**>(&frame.method_), arg,
- RootInfo(kRootVMInternal, thread_id));
+ visitor->VisitRootIfNonNull(&frame.this_object_, RootInfo(kRootVMInternal, thread_id));
+ visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&frame.method_),
+ RootInfo(kRootVMInternal, thread_id));
}
}
-static void VerifyRoot(mirror::Object** root, void* /*arg*/, const RootInfo& /*root_info*/)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- VerifyObject(*root);
-}
+class VerifyRootVisitor : public SingleRootVisitor {
+ public:
+ void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
+ OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ VerifyObject(root);
+ }
+};
void Thread::VerifyStackImpl() {
+ VerifyRootVisitor visitor;
std::unique_ptr<Context> context(Context::Create());
- RootCallbackVisitor visitorToCallback(VerifyRoot, Runtime::Current()->GetHeap(), GetThreadId());
- ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context.get(), visitorToCallback);
+ RootCallbackVisitor visitor_to_callback(&visitor, GetThreadId());
+ ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context.get(), visitor_to_callback);
mapper.WalkStack();
}
diff --git a/runtime/thread.h b/runtime/thread.h
index 9d4d89d..f89e46b 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -485,7 +485,7 @@ class Thread {
jobjectArray output_array = nullptr, int* stack_depth = nullptr)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void VisitRoots(RootCallback* visitor, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
ALWAYS_INLINE void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -686,7 +686,7 @@ class Thread {
// Is the given obj in this thread's stack indirect reference table?
bool HandleScopeContains(jobject obj) const;
- void HandleScopeVisitRoots(RootCallback* visitor, void* arg, uint32_t thread_id)
+ void HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
HandleScope* GetTopHandleScope() {
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 1ab0093..560bcc1 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -1156,10 +1156,10 @@ void ThreadList::ForEach(void (*callback)(Thread*, void*), void* context) {
}
}
-void ThreadList::VisitRoots(RootCallback* callback, void* arg) const {
+void ThreadList::VisitRoots(RootVisitor* visitor) const {
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
for (const auto& thread : list_) {
- thread->VisitRoots(callback, arg);
+ thread->VisitRoots(visitor);
}
}
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index c18e285..fa747b8 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -136,7 +136,7 @@ class ThreadList {
LOCKS_EXCLUDED(Locks::mutator_lock_, Locks::thread_list_lock_);
void Unregister(Thread* self) LOCKS_EXCLUDED(Locks::mutator_lock_, Locks::thread_list_lock_);
- void VisitRoots(RootCallback* callback, void* arg) const
+ void VisitRoots(RootVisitor* visitor) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Return a copy of the thread list.
diff --git a/runtime/trace.cc b/runtime/trace.cc
index ea0a642..5375dc0 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -243,8 +243,7 @@ static void GetSample(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mu
the_trace->CompareAndUpdateStackTrace(thread, stack_trace);
}
-static void ClearThreadStackTraceAndClockBase(Thread* thread ATTRIBUTE_UNUSED,
- void* arg ATTRIBUTE_UNUSED) {
+static void ClearThreadStackTraceAndClockBase(Thread* thread, void* arg ATTRIBUTE_UNUSED) {
thread->SetTraceClockBase(0);
std::vector<mirror::ArtMethod*>* stack_trace = thread->GetStackTraceSample();
thread->SetStackTraceSample(NULL);
diff --git a/runtime/transaction.cc b/runtime/transaction.cc
index 9b205c3..cc0f15f 100644
--- a/runtime/transaction.cc
+++ b/runtime/transaction.cc
@@ -221,24 +221,24 @@ void Transaction::UndoInternStringTableModifications() {
intern_string_logs_.clear();
}
-void Transaction::VisitRoots(RootCallback* callback, void* arg) {
+void Transaction::VisitRoots(RootVisitor* visitor) {
MutexLock mu(Thread::Current(), log_lock_);
- VisitObjectLogs(callback, arg);
- VisitArrayLogs(callback, arg);
- VisitStringLogs(callback, arg);
+ VisitObjectLogs(visitor);
+ VisitArrayLogs(visitor);
+ VisitStringLogs(visitor);
}
-void Transaction::VisitObjectLogs(RootCallback* callback, void* arg) {
+void Transaction::VisitObjectLogs(RootVisitor* visitor) {
// List of moving roots.
typedef std::pair<mirror::Object*, mirror::Object*> ObjectPair;
std::list<ObjectPair> moving_roots;
// Visit roots.
for (auto it : object_logs_) {
- it.second.VisitRoots(callback, arg);
+ it.second.VisitRoots(visitor);
mirror::Object* old_root = it.first;
mirror::Object* new_root = old_root;
- callback(&new_root, arg, RootInfo(kRootUnknown));
+ visitor->VisitRoot(&new_root, RootInfo(kRootUnknown));
if (new_root != old_root) {
moving_roots.push_back(std::make_pair(old_root, new_root));
}
@@ -256,7 +256,7 @@ void Transaction::VisitObjectLogs(RootCallback* callback, void* arg) {
}
}
-void Transaction::VisitArrayLogs(RootCallback* callback, void* arg) {
+void Transaction::VisitArrayLogs(RootVisitor* visitor) {
// List of moving roots.
typedef std::pair<mirror::Array*, mirror::Array*> ArrayPair;
std::list<ArrayPair> moving_roots;
@@ -265,7 +265,7 @@ void Transaction::VisitArrayLogs(RootCallback* callback, void* arg) {
mirror::Array* old_root = it.first;
CHECK(!old_root->IsObjectArray());
mirror::Array* new_root = old_root;
- callback(reinterpret_cast<mirror::Object**>(&new_root), arg, RootInfo(kRootUnknown));
+ visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&new_root), RootInfo(kRootUnknown));
if (new_root != old_root) {
moving_roots.push_back(std::make_pair(old_root, new_root));
}
@@ -283,9 +283,9 @@ void Transaction::VisitArrayLogs(RootCallback* callback, void* arg) {
}
}
-void Transaction::VisitStringLogs(RootCallback* callback, void* arg) {
+void Transaction::VisitStringLogs(RootVisitor* visitor) {
for (InternStringLog& log : intern_string_logs_) {
- log.VisitRoots(callback, arg);
+ log.VisitRoots(visitor);
}
}
@@ -421,16 +421,12 @@ void Transaction::ObjectLog::UndoFieldWrite(mirror::Object* obj, MemberOffset fi
}
}
-void Transaction::ObjectLog::VisitRoots(RootCallback* callback, void* arg) {
+void Transaction::ObjectLog::VisitRoots(RootVisitor* visitor) {
for (auto it : field_values_) {
FieldValue& field_value = it.second;
if (field_value.kind == ObjectLog::kReference) {
- mirror::Object* obj =
- reinterpret_cast<mirror::Object*>(static_cast<uintptr_t>(field_value.value));
- if (obj != nullptr) {
- callback(&obj, arg, RootInfo(kRootUnknown));
- field_value.value = reinterpret_cast<uintptr_t>(obj);
- }
+ visitor->VisitRootIfNonNull(reinterpret_cast<mirror::Object**>(&field_value.value),
+ RootInfo(kRootUnknown));
}
}
}
@@ -472,8 +468,8 @@ void Transaction::InternStringLog::Undo(InternTable* intern_table) {
}
}
-void Transaction::InternStringLog::VisitRoots(RootCallback* callback, void* arg) {
- callback(reinterpret_cast<mirror::Object**>(&str_), arg, RootInfo(kRootInternedString));
+void Transaction::InternStringLog::VisitRoots(RootVisitor* visitor) {
+ visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&str_), RootInfo(kRootInternedString));
}
void Transaction::ArrayLog::LogValue(size_t index, uint64_t value) {
diff --git a/runtime/transaction.h b/runtime/transaction.h
index 1419a38..4d85662 100644
--- a/runtime/transaction.h
+++ b/runtime/transaction.h
@@ -100,7 +100,7 @@ class Transaction FINAL {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
LOCKS_EXCLUDED(log_lock_);
- void VisitRoots(RootCallback* callback, void* arg)
+ void VisitRoots(RootVisitor* visitor)
LOCKS_EXCLUDED(log_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -116,7 +116,7 @@ class Transaction FINAL {
void LogReferenceValue(MemberOffset offset, mirror::Object* obj, bool is_volatile);
void Undo(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void VisitRoots(RootCallback* callback, void* arg);
+ void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
size_t Size() const {
return field_values_.size();
@@ -184,7 +184,7 @@ class Transaction FINAL {
void Undo(InternTable* intern_table)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
- void VisitRoots(RootCallback* callback, void* arg);
+ void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
mirror::String* str_;
@@ -207,13 +207,13 @@ class Transaction FINAL {
EXCLUSIVE_LOCKS_REQUIRED(log_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void VisitObjectLogs(RootCallback* callback, void* arg)
+ void VisitObjectLogs(RootVisitor* visitor)
EXCLUSIVE_LOCKS_REQUIRED(log_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void VisitArrayLogs(RootCallback* callback, void* arg)
+ void VisitArrayLogs(RootVisitor* visitor)
EXCLUSIVE_LOCKS_REQUIRED(log_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void VisitStringLogs(RootCallback* callback, void* arg)
+ void VisitStringLogs(RootVisitor* visitor)
EXCLUSIVE_LOCKS_REQUIRED(log_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 1d04192..d0f8468 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -1075,7 +1075,6 @@ bool MethodVerifier::GetBranchOffset(uint32_t cur_offset, int32_t* pOffset, bool
break;
default:
return false;
- break;
}
return true;
}
@@ -4351,12 +4350,12 @@ void MethodVerifier::Shutdown() {
verifier::RegTypeCache::ShutDown();
}
-void MethodVerifier::VisitStaticRoots(RootCallback* callback, void* arg) {
- RegTypeCache::VisitStaticRoots(callback, arg);
+void MethodVerifier::VisitStaticRoots(RootVisitor* visitor) {
+ RegTypeCache::VisitStaticRoots(visitor);
}
-void MethodVerifier::VisitRoots(RootCallback* callback, void* arg, const RootInfo& root_info) {
- reg_types_.VisitRoots(callback, arg, root_info);
+void MethodVerifier::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) {
+ reg_types_.VisitRoots(visitor, root_info);
}
} // namespace verifier
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 6b813ef..c813634 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -225,9 +225,9 @@ class MethodVerifier {
// Describe VRegs at the given dex pc.
std::vector<int32_t> DescribeVRegs(uint32_t dex_pc);
- static void VisitStaticRoots(RootCallback* callback, void* arg)
+ static void VisitStaticRoots(RootVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void VisitRoots(RootCallback* callback, void* arg, const RootInfo& roots)
+ void VisitRoots(RootVisitor* visitor, const RootInfo& roots)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Accessors used by the compiler via CompilerCallback
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 97d0cbe..c8aa4fd 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -778,8 +778,8 @@ void RegType::CheckInvariants() const {
}
}
-void RegType::VisitRoots(RootCallback* callback, void* arg, const RootInfo& root_info) const {
- klass_.VisitRootIfNonNull(callback, arg, root_info);
+void RegType::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) const {
+ klass_.VisitRootIfNonNull(visitor, root_info);
}
void UninitializedThisReferenceType::CheckInvariants() const {
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index d260650..e4d2c3e 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -262,7 +262,7 @@ class RegType {
virtual ~RegType() {}
- void VisitRoots(RootCallback* callback, void* arg, const RootInfo& root_info) const
+ void VisitRoots(RootVisitor* visitor, const RootInfo& root_info) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
protected:
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index 6e57857..b371d7e 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -557,33 +557,33 @@ void RegTypeCache::Dump(std::ostream& os) {
}
}
-void RegTypeCache::VisitStaticRoots(RootCallback* callback, void* arg) {
+void RegTypeCache::VisitStaticRoots(RootVisitor* visitor) {
// Visit the primitive types, this is required since if there are no active verifiers they wont
// be in the entries array, and therefore not visited as roots.
if (primitive_initialized_) {
RootInfo ri(kRootUnknown);
- UndefinedType::GetInstance()->VisitRoots(callback, arg, ri);
- ConflictType::GetInstance()->VisitRoots(callback, arg, ri);
- BooleanType::GetInstance()->VisitRoots(callback, arg, ri);
- ByteType::GetInstance()->VisitRoots(callback, arg, ri);
- ShortType::GetInstance()->VisitRoots(callback, arg, ri);
- CharType::GetInstance()->VisitRoots(callback, arg, ri);
- IntegerType::GetInstance()->VisitRoots(callback, arg, ri);
- LongLoType::GetInstance()->VisitRoots(callback, arg, ri);
- LongHiType::GetInstance()->VisitRoots(callback, arg, ri);
- FloatType::GetInstance()->VisitRoots(callback, arg, ri);
- DoubleLoType::GetInstance()->VisitRoots(callback, arg, ri);
- DoubleHiType::GetInstance()->VisitRoots(callback, arg, ri);
+ UndefinedType::GetInstance()->VisitRoots(visitor, ri);
+ ConflictType::GetInstance()->VisitRoots(visitor, ri);
+ BooleanType::GetInstance()->VisitRoots(visitor, ri);
+ ByteType::GetInstance()->VisitRoots(visitor, ri);
+ ShortType::GetInstance()->VisitRoots(visitor, ri);
+ CharType::GetInstance()->VisitRoots(visitor, ri);
+ IntegerType::GetInstance()->VisitRoots(visitor, ri);
+ LongLoType::GetInstance()->VisitRoots(visitor, ri);
+ LongHiType::GetInstance()->VisitRoots(visitor, ri);
+ FloatType::GetInstance()->VisitRoots(visitor, ri);
+ DoubleLoType::GetInstance()->VisitRoots(visitor, ri);
+ DoubleHiType::GetInstance()->VisitRoots(visitor, ri);
for (int32_t value = kMinSmallConstant; value <= kMaxSmallConstant; ++value) {
- small_precise_constants_[value - kMinSmallConstant]->VisitRoots(callback, arg, ri);
+ small_precise_constants_[value - kMinSmallConstant]->VisitRoots(visitor, ri);
}
}
}
-void RegTypeCache::VisitRoots(RootCallback* callback, void* arg, const RootInfo& root_info) {
+void RegTypeCache::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) {
// Exclude the static roots that are visited by VisitStaticRoots().
for (size_t i = primitive_count_; i < entries_.size(); ++i) {
- entries_[i]->VisitRoots(callback, arg, root_info);
+ entries_[i]->VisitRoots(visitor, root_info);
}
}
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index 01032a0..4b3105c 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -137,9 +137,9 @@ class RegTypeCache {
void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const RegType& RegTypeFromPrimitiveType(Primitive::Type) const;
- void VisitRoots(RootCallback* callback, void* arg, const RootInfo& root_info)
+ void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void VisitStaticRoots(RootCallback* callback, void* arg)
+ static void VisitStaticRoots(RootVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
diff --git a/sigchainlib/sigchain_dummy.cc b/sigchainlib/sigchain_dummy.cc
index 76779ab..70a4f71 100644
--- a/sigchainlib/sigchain_dummy.cc
+++ b/sigchainlib/sigchain_dummy.cc
@@ -28,6 +28,11 @@
#define ATTRIBUTE_UNUSED __attribute__((__unused__))
+// We cannot annotate the declarations, as they are not no-return in the non-dummy version.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunknown-pragmas"
+#pragma GCC diagnostic ignored "-Wmissing-noreturn"
+
static void log(const char* format, ...) {
char buf[256];
va_list ap;
@@ -73,4 +78,6 @@ extern "C" void EnsureFrontOfChain(int signal ATTRIBUTE_UNUSED,
abort();
}
+#pragma GCC diagnostic pop
+
} // namespace art