summaryrefslogtreecommitdiffstats
path: root/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'runtime')
-rw-r--r--runtime/Android.mk1
-rw-r--r--runtime/arch/arm64/asm_support_arm64.h6
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S63
-rw-r--r--runtime/arch/x86_64/asm_support_x86_64.S2
-rw-r--r--runtime/arch/x86_64/asm_support_x86_64.h6
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S9
-rw-r--r--runtime/arch/x86_64/registers_x86_64.h2
-rw-r--r--runtime/base/logging.h22
-rw-r--r--runtime/base/mutex.cc4
-rw-r--r--runtime/base/mutex.h5
-rw-r--r--runtime/base/timing_logger.cc38
-rw-r--r--runtime/base/timing_logger.h13
-rw-r--r--runtime/class_linker.cc22
-rw-r--r--runtime/common_throws.cc6
-rw-r--r--runtime/common_throws.h5
-rw-r--r--runtime/debugger.cc385
-rw-r--r--runtime/debugger.h73
-rw-r--r--runtime/entrypoints/entrypoint_utils.cc2
-rw-r--r--runtime/gc/accounting/heap_bitmap-inl.h51
-rw-r--r--runtime/gc/accounting/heap_bitmap.h60
-rw-r--r--runtime/gc/accounting/mod_union_table-inl.h37
-rw-r--r--runtime/gc/accounting/mod_union_table.cc85
-rw-r--r--runtime/gc/accounting/mod_union_table.h8
-rw-r--r--runtime/gc/accounting/remembered_set.cc36
-rw-r--r--runtime/gc/accounting/remembered_set.h2
-rw-r--r--runtime/gc/allocator/rosalloc.cc24
-rw-r--r--runtime/gc/allocator/rosalloc.h4
-rw-r--r--runtime/gc/collector/concurrent_copying.cc25
-rw-r--r--runtime/gc/collector/concurrent_copying.h56
-rw-r--r--runtime/gc/collector/garbage_collector.cc127
-rw-r--r--runtime/gc/collector/garbage_collector.h29
-rw-r--r--runtime/gc/collector/immune_region.cc12
-rw-r--r--runtime/gc/collector/immune_region.h17
-rw-r--r--runtime/gc/collector/mark_sweep-inl.h125
-rw-r--r--runtime/gc/collector/mark_sweep.cc524
-rw-r--r--runtime/gc/collector/mark_sweep.h154
-rw-r--r--runtime/gc/collector/semi_space-inl.h59
-rw-r--r--runtime/gc/collector/semi_space.cc189
-rw-r--r--runtime/gc/collector/semi_space.h149
-rw-r--r--runtime/gc/collector/sticky_mark_sweep.h4
-rw-r--r--runtime/gc/collector_type.h2
-rw-r--r--runtime/gc/heap-inl.h6
-rw-r--r--runtime/gc/heap.cc261
-rw-r--r--runtime/gc/heap.h36
-rw-r--r--runtime/gc/space/bump_pointer_space.cc20
-rw-r--r--runtime/gc/space/bump_pointer_space.h3
-rw-r--r--runtime/gc/space/dlmalloc_space.cc2
-rw-r--r--runtime/gc/space/large_object_space.cc50
-rw-r--r--runtime/gc/space/large_object_space.h8
-rw-r--r--runtime/gc/space/rosalloc_space.cc8
-rw-r--r--runtime/gc/space/rosalloc_space.h1
-rw-r--r--runtime/instrumentation.cc78
-rw-r--r--runtime/instrumentation.h39
-rw-r--r--runtime/intern_table.cc8
-rw-r--r--runtime/interpreter/interpreter_common.cc5
-rw-r--r--runtime/jdwp/jdwp.h4
-rw-r--r--runtime/jdwp/jdwp_event.cc41
-rw-r--r--runtime/jdwp/jdwp_main.cc1
-rw-r--r--runtime/jni_internal.cc20
-rw-r--r--runtime/mem_map.cc42
-rw-r--r--runtime/mirror/array-inl.h95
-rw-r--r--runtime/mirror/art_field.cc9
-rw-r--r--runtime/mirror/art_field.h4
-rw-r--r--runtime/mirror/art_method.cc12
-rw-r--r--runtime/mirror/art_method.h4
-rw-r--r--runtime/mirror/class-inl.h6
-rw-r--r--runtime/mirror/class.h4
-rw-r--r--runtime/mirror/object-inl.h71
-rw-r--r--runtime/mirror/object.h19
-rw-r--r--runtime/mirror/object_array-inl.h11
-rw-r--r--runtime/mirror/object_array.h5
-rw-r--r--runtime/mirror/string.cc4
-rw-r--r--runtime/native/java_lang_reflect_Constructor.cc12
-rw-r--r--runtime/native/java_lang_reflect_Field.cc381
-rw-r--r--runtime/native/java_lang_reflect_Method.cc13
-rw-r--r--runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc38
-rw-r--r--runtime/object_callbacks.h4
-rw-r--r--runtime/offsets.h4
-rw-r--r--runtime/parsed_options.cc6
-rw-r--r--runtime/quick/inline_method_analyser.cc87
-rw-r--r--runtime/reflection.cc143
-rw-r--r--runtime/reflection.h16
-rw-r--r--runtime/runtime.cc14
-rw-r--r--runtime/runtime.h16
-rw-r--r--runtime/thread.cc16
-rw-r--r--runtime/thread.h7
-rw-r--r--runtime/verifier/method_verifier.cc15
-rw-r--r--runtime/verifier/reg_type_test.cc6
-rw-r--r--runtime/verify_object-inl.h5
89 files changed, 2374 insertions, 1729 deletions
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 1ca8e07..1576905 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -52,6 +52,7 @@ LIBART_COMMON_SRC_FILES := \
gc/accounting/mod_union_table.cc \
gc/accounting/remembered_set.cc \
gc/accounting/space_bitmap.cc \
+ gc/collector/concurrent_copying.cc \
gc/collector/garbage_collector.cc \
gc/collector/immune_region.cc \
gc/collector/mark_sweep.cc \
diff --git a/runtime/arch/arm64/asm_support_arm64.h b/runtime/arch/arm64/asm_support_arm64.h
index 44c3e60..263a764 100644
--- a/runtime/arch/arm64/asm_support_arm64.h
+++ b/runtime/arch/arm64/asm_support_arm64.h
@@ -22,11 +22,11 @@
// TODO Thread offsets need to be checked when on Aarch64.
// Offset of field Runtime::callee_save_methods_[kSaveAll]
-#define RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET 320
+#define RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET 0
// Offset of field Runtime::callee_save_methods_[kRefsOnly]
-#define RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET 328
+#define RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET 8
// Offset of field Runtime::callee_save_methods_[kRefsAndArgs]
-#define RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET 336
+#define RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET 16
// Register holding Thread::Current().
#define xSELF x18
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 2d64e7f..3082273 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -197,6 +197,33 @@
.cfi_adjust_cfa_offset -304
.endm
+.macro RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME_NO_D0
+
+ ldr d1, [sp, #24]
+ ldp d2, d3, [sp, #32]
+ ldp d4, d5, [sp, #48]
+ ldp d6, d7, [sp, #64]
+ ldp d8, d9, [sp, #80]
+ ldp d10, d11, [sp, #96]
+ ldp d12, d13, [sp, #112]
+ ldp d14, d15, [sp, #128]
+
+ // args.
+ ldp x1, x2, [sp, #144]
+ ldp x3, x4, [sp, #160]
+ ldp x5, x6, [sp, #176]
+ ldp x7, xSELF, [sp, #192]
+ ldp x19, x20, [sp, #208]
+ ldp x21, x22, [sp, #224]
+ ldp x23, x24, [sp, #240]
+ ldp x25, x26, [sp, #256]
+ ldp x27, x28, [sp, #272]
+ ldp xFP, xLR, [sp, #288]
+
+ add sp, sp, #304
+ .cfi_adjust_cfa_offset -304
+.endm
+
.macro RETURN_IF_RESULT_IS_ZERO
brk 0
.endm
@@ -453,7 +480,7 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+8
cmp w17, #'J' // is this a long?
bne .LisOther
- cmp x8, # 7*12 // Skip this load if all registers full.
+ cmp x8, # 6*12 // Skip this load if all registers full.
beq .LfillRegisters
add x17, x12, x8 // Calculate subroutine to jump to.
@@ -461,7 +488,7 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+8
.LisOther: // Everything else takes one vReg.
- cmp x8, # 7*12 // Skip this load if all registers full.
+ cmp x8, # 6*12 // Skip this load if all registers full.
beq .LfillRegisters
add x17, x11, x8 // Calculate subroutine to jump to.
br x17
@@ -876,24 +903,42 @@ GENERATE_ALL_ALLOC_ENTRYPOINTS
UNIMPLEMENTED art_quick_test_suspend
-/**
- * Returned by ClassLinker::GetOatCodeFor
- *
- */
-UNIMPLEMENTED art_quick_proxy_invoke_handler
+ /*
+ * Called by managed code that is attempting to call a method on a proxy class. On entry
+ * x0 holds the proxy method and x1 holds the receiver; The frame size of the invoked proxy
+ * method agrees with a ref and args callee save frame.
+ */
+ .extern artQuickProxyInvokeHandler
+ENTRY art_quick_proxy_invoke_handler
+ SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ str x0, [sp, #0] // place proxy method at bottom of frame
+ mov x2, xSELF // pass Thread::Current
+ mov x3, sp // pass SP
+ bl artQuickProxyInvokeHandler // (Method* proxy method, receiver, Thread*, SP)
+ ldr xSELF, [sp, #200] // Restore self pointer.
+ ldr x2, [xSELF, THREAD_EXCEPTION_OFFSET]
+ cbnz x2, .Lexception_in_proxy // success if no exception is pending
+ RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME_NO_D0 // keep d0
+ ret // return on success
+.Lexception_in_proxy:
+ RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ DELIVER_PENDING_EXCEPTION
+END art_quick_proxy_invoke_handler
UNIMPLEMENTED art_quick_imt_conflict_trampoline
ENTRY art_quick_resolution_trampoline
SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ mov x19, x0 // save the called method
mov x2, xSELF
mov x3, sp
bl artQuickResolutionTrampoline // (called, receiver, Thread*, SP)
- mov x9, x0 // Remember returned code pointer in x9.
+ mov x9, x0 // Remember returned code pointer in x9.
+ mov x0, x19 // Restore the method, before x19 is restored to on-call value
RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
cbz x9, 1f
- br x0
+ br x9
1:
RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
DELIVER_PENDING_EXCEPTION
diff --git a/runtime/arch/x86_64/asm_support_x86_64.S b/runtime/arch/x86_64/asm_support_x86_64.S
index ca2489c..d03a474 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.S
+++ b/runtime/arch/x86_64/asm_support_x86_64.S
@@ -29,7 +29,7 @@
// Clang's as(1) uses $0, $1, and so on for macro arguments.
#define VAR(name,index) SYMBOL($index)
- #define PLT_VAR(name, index) SYMBOL($index)
+ #define PLT_VAR(name, index) SYMBOL($index)@PLT
#define REG_VAR(name,index) %$index
#define CALL_MACRO(name,index) $index
#define FUNCTION_TYPE(name,index) .type $index, @function
diff --git a/runtime/arch/x86_64/asm_support_x86_64.h b/runtime/arch/x86_64/asm_support_x86_64.h
index 5a4e63e..03d9e24 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.h
+++ b/runtime/arch/x86_64/asm_support_x86_64.h
@@ -20,11 +20,11 @@
#include "asm_support.h"
// Offset of field Runtime::callee_save_methods_[kSaveAll]
-#define RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET 200
+#define RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET 0
// Offset of field Runtime::callee_save_methods_[kRefsOnly]
-#define RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET 208
+#define RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET 8
// Offset of field Runtime::callee_save_methods_[kRefsAndArgs]
-#define RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET 216
+#define RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET 16
// Offset of field Thread::self_ verified in InitCpu
#define THREAD_SELF_OFFSET 72
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 5fbf8cb..0d75a89 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -142,8 +142,13 @@ END_MACRO
MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
DEFINE_FUNCTION VAR(c_name, 0)
- int3
- int3
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ // Outgoing argument set up
+ mov %rsp, %rdx // pass SP
+ mov %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
+ mov %rax, %rdi // pass arg1
+ call PLT_VAR(cxx_name, 1) // cxx_name(arg1, Thread*, SP)
+ int3 // unreached
END_FUNCTION VAR(c_name, 0)
END_MACRO
diff --git a/runtime/arch/x86_64/registers_x86_64.h b/runtime/arch/x86_64/registers_x86_64.h
index 8b0dc07..b9d06b5 100644
--- a/runtime/arch/x86_64/registers_x86_64.h
+++ b/runtime/arch/x86_64/registers_x86_64.h
@@ -67,7 +67,7 @@ enum FloatRegister {
XMM15 = 15,
kNumberOfFloatRegisters = 16
};
-std::ostream& operator<<(std::ostream& os, const FloatRegister& rhs);
+std::ostream& operator<<(std::ostream& os, const Register& rhs);
} // namespace x86_64
} // namespace art
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index 0fcec1f..fcec733 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -235,6 +235,28 @@ std::ostream& operator<<(std::ostream& os, const Dumpable<T>& rhs) {
return os;
}
+template<typename T>
+class ConstDumpable {
+ public:
+ explicit ConstDumpable(const T& value) : value_(value) {
+ }
+
+ void Dump(std::ostream& os) const {
+ value_.Dump(os);
+ }
+
+ private:
+ const T& value_;
+
+ DISALLOW_COPY_AND_ASSIGN(ConstDumpable);
+};
+
+template<typename T>
+std::ostream& operator<<(std::ostream& os, const ConstDumpable<T>& rhs) {
+ rhs.Dump(os);
+ return os;
+}
+
// Helps you use operator<< in a const char*-like context such as our various 'F' methods with
// format strings.
template<typename T>
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 532e6c4..fdd0249 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -31,7 +31,6 @@ namespace art {
Mutex* Locks::abort_lock_ = nullptr;
Mutex* Locks::breakpoint_lock_ = nullptr;
-Mutex* Locks::deoptimization_lock_ = nullptr;
ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
Mutex* Locks::logging_lock_ = nullptr;
@@ -816,7 +815,6 @@ void Locks::Init() {
// Already initialized.
DCHECK(abort_lock_ != nullptr);
DCHECK(breakpoint_lock_ != nullptr);
- DCHECK(deoptimization_lock_ != nullptr);
DCHECK(classlinker_classes_lock_ != nullptr);
DCHECK(heap_bitmap_lock_ != nullptr);
DCHECK(logging_lock_ != nullptr);
@@ -833,8 +831,6 @@ void Locks::Init() {
DCHECK(breakpoint_lock_ == nullptr);
breakpoint_lock_ = new Mutex("breakpoint lock", kBreakpointLock);
- DCHECK(deoptimization_lock_ == nullptr);
- deoptimization_lock_ = new Mutex("deoptimization lock", kDeoptimizationLock);
DCHECK(classlinker_classes_lock_ == nullptr);
classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
kClassLinkerClassesLock);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 55ec1c3..4b881f6 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -537,11 +537,8 @@ class Locks {
// Guards breakpoints.
static Mutex* breakpoint_lock_ ACQUIRED_AFTER(thread_list_lock_);
- // Guards deoptimization requests.
- static Mutex* deoptimization_lock_ ACQUIRED_AFTER(breakpoint_lock_);
-
// Guards trace requests.
- static Mutex* trace_lock_ ACQUIRED_AFTER(deoptimization_lock_);
+ static Mutex* trace_lock_ ACQUIRED_AFTER(breakpoint_lock_);
// Guards profile objects.
static Mutex* profiler_lock_ ACQUIRED_AFTER(trace_lock_);
diff --git a/runtime/base/timing_logger.cc b/runtime/base/timing_logger.cc
index fe18f66..a155002 100644
--- a/runtime/base/timing_logger.cc
+++ b/runtime/base/timing_logger.cc
@@ -54,38 +54,24 @@ void CumulativeLogger::Start() {
void CumulativeLogger::End() {
MutexLock mu(Thread::Current(), lock_);
- iterations_++;
+ ++iterations_;
}
void CumulativeLogger::Reset() {
MutexLock mu(Thread::Current(), lock_);
iterations_ = 0;
+ total_time_ = 0;
STLDeleteElements(&histograms_);
}
-uint64_t CumulativeLogger::GetTotalNs() const {
- MutexLock mu(Thread::Current(), lock_);
- return GetTotalTime() * kAdjust;
-}
-
-uint64_t CumulativeLogger::GetTotalTime() const {
- MutexLock mu(Thread::Current(), lock_);
- uint64_t total = 0;
- for (Histogram<uint64_t>* histogram : histograms_) {
- total += histogram->Sum();
- }
- return total;
-}
-
void CumulativeLogger::AddLogger(const TimingLogger &logger) {
MutexLock mu(Thread::Current(), lock_);
- const TimingLogger::SplitTimings& splits = logger.GetSplits();
- for (auto it = splits.begin(), end = splits.end(); it != end; ++it) {
- TimingLogger::SplitTiming split = *it;
+ for (const TimingLogger::SplitTiming& split : logger.GetSplits()) {
uint64_t split_time = split.first;
const char* split_name = split.second;
AddPair(split_name, split_time);
}
+ ++iterations_;
}
size_t CumulativeLogger::GetIterations() const {
@@ -93,7 +79,7 @@ size_t CumulativeLogger::GetIterations() const {
return iterations_;
}
-void CumulativeLogger::Dump(std::ostream &os) {
+void CumulativeLogger::Dump(std::ostream &os) const {
MutexLock mu(Thread::Current(), lock_);
DumpHistogram(os);
}
@@ -101,7 +87,7 @@ void CumulativeLogger::Dump(std::ostream &os) {
void CumulativeLogger::AddPair(const std::string& label, uint64_t delta_time) {
// Convert delta time to microseconds so that we don't overflow our counters.
delta_time /= kAdjust;
-
+ total_time_ += delta_time;
Histogram<uint64_t>* histogram;
Histogram<uint64_t> dummy(label.c_str());
auto it = histograms_.find(&dummy);
@@ -123,7 +109,7 @@ class CompareHistorgramByTimeSpentDeclining {
}
};
-void CumulativeLogger::DumpHistogram(std::ostream &os) {
+void CumulativeLogger::DumpHistogram(std::ostream &os) const {
os << "Start Dumping histograms for " << iterations_ << " iterations"
<< " for " << name_ << "\n";
std::set<Histogram<uint64_t>*, CompareHistorgramByTimeSpentDeclining>
@@ -174,8 +160,7 @@ void TimingLogger::NewSplit(const char* new_split_label) {
uint64_t TimingLogger::GetTotalNs() const {
uint64_t total_ns = 0;
- for (auto it = splits_.begin(), end = splits_.end(); it != end; ++it) {
- TimingLogger::SplitTiming split = *it;
+ for (const TimingLogger::SplitTiming& split : splits_) {
total_ns += split.first;
}
return total_ns;
@@ -184,8 +169,7 @@ uint64_t TimingLogger::GetTotalNs() const {
void TimingLogger::Dump(std::ostream &os) const {
uint64_t longest_split = 0;
uint64_t total_ns = 0;
- for (auto it = splits_.begin(), end = splits_.end(); it != end; ++it) {
- TimingLogger::SplitTiming split = *it;
+ for (const SplitTiming& split : splits_) {
uint64_t split_time = split.first;
longest_split = std::max(longest_split, split_time);
total_ns += split_time;
@@ -194,8 +178,7 @@ void TimingLogger::Dump(std::ostream &os) const {
TimeUnit tu = GetAppropriateTimeUnit(longest_split);
uint64_t divisor = GetNsToTimeUnitDivisor(tu);
// Print formatted splits.
- for (auto it = splits_.begin(), end = splits_.end(); it != end; ++it) {
- const TimingLogger::SplitTiming& split = *it;
+ for (const SplitTiming& split : splits_) {
uint64_t split_time = split.first;
if (!precise_ && divisor >= 1000) {
// Make the fractional part 0.
@@ -207,7 +190,6 @@ void TimingLogger::Dump(std::ostream &os) const {
os << name_ << ": end, " << NsToMs(total_ns) << " ms\n";
}
-
TimingLogger::ScopedSplit::ScopedSplit(const char* label, TimingLogger* timing_logger) {
DCHECK(label != NULL) << "New scoped split (" << label << ") with null label.";
CHECK(timing_logger != NULL) << "New scoped split (" << label << ") without TimingLogger.";
diff --git a/runtime/base/timing_logger.h b/runtime/base/timing_logger.h
index b0bcf10..9b55898 100644
--- a/runtime/base/timing_logger.h
+++ b/runtime/base/timing_logger.h
@@ -35,8 +35,10 @@ class CumulativeLogger {
void Start();
void End() LOCKS_EXCLUDED(lock_);
void Reset() LOCKS_EXCLUDED(lock_);
- void Dump(std::ostream& os) LOCKS_EXCLUDED(lock_);
- uint64_t GetTotalNs() const;
+ void Dump(std::ostream& os) const LOCKS_EXCLUDED(lock_);
+ uint64_t GetTotalNs() const {
+ return GetTotalTime() * kAdjust;
+ }
// Allow the name to be modified, particularly when the cumulative logger is a field within a
// parent class that is unable to determine the "name" of a sub-class.
void SetName(const std::string& name) LOCKS_EXCLUDED(lock_);
@@ -57,14 +59,17 @@ class CumulativeLogger {
void AddPair(const std::string &label, uint64_t delta_time)
EXCLUSIVE_LOCKS_REQUIRED(lock_);
- void DumpHistogram(std::ostream &os) EXCLUSIVE_LOCKS_REQUIRED(lock_);
- uint64_t GetTotalTime() const;
+ void DumpHistogram(std::ostream &os) const EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ uint64_t GetTotalTime() const {
+ return total_time_;
+ }
static const uint64_t kAdjust = 1000;
std::set<Histogram<uint64_t>*, HistogramComparator> histograms_ GUARDED_BY(lock_);
std::string name_;
const std::string lock_name_;
mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
size_t iterations_ GUARDED_BY(lock_);
+ uint64_t total_time_;
DISALLOW_COPY_AND_ASSIGN(CumulativeLogger);
};
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index cef9954..08ea123 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -2314,26 +2314,26 @@ mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descripto
}
mirror::Class* ClassLinker::FindPrimitiveClass(char type) {
- switch (Primitive::GetType(type)) {
- case Primitive::kPrimByte:
+ switch (type) {
+ case 'B':
return GetClassRoot(kPrimitiveByte);
- case Primitive::kPrimChar:
+ case 'C':
return GetClassRoot(kPrimitiveChar);
- case Primitive::kPrimDouble:
+ case 'D':
return GetClassRoot(kPrimitiveDouble);
- case Primitive::kPrimFloat:
+ case 'F':
return GetClassRoot(kPrimitiveFloat);
- case Primitive::kPrimInt:
+ case 'I':
return GetClassRoot(kPrimitiveInt);
- case Primitive::kPrimLong:
+ case 'J':
return GetClassRoot(kPrimitiveLong);
- case Primitive::kPrimShort:
+ case 'S':
return GetClassRoot(kPrimitiveShort);
- case Primitive::kPrimBoolean:
+ case 'Z':
return GetClassRoot(kPrimitiveBoolean);
- case Primitive::kPrimVoid:
+ case 'V':
return GetClassRoot(kPrimitiveVoid);
- case Primitive::kPrimNot:
+ default:
break;
}
std::string printable_type(PrintableChar(type));
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 24d16c4..4b6d82b 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -175,6 +175,12 @@ void ThrowIllegalAccessError(mirror::Class* referrer, const char* fmt, ...) {
va_end(args);
}
+// IllegalAccessException
+
+void ThrowIllegalAccessException(const ThrowLocation* throw_location, const char* msg) {
+ ThrowException(throw_location, "Ljava/lang/IllegalAccessException;", NULL, msg);
+}
+
// IllegalArgumentException
void ThrowIllegalArgumentException(const ThrowLocation* throw_location, const char* msg) {
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index 792cdef..c06763e 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -92,6 +92,11 @@ void ThrowIllegalAccessError(mirror::Class* referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+// IllegalAccessException
+
+void ThrowIllegalAccessException(const ThrowLocation* throw_location, const char* msg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
+
// IllegalArgumentException
void ThrowIllegalArgumentException(const ThrowLocation* throw_location, const char* msg)
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index c702229..9af9c7a 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -38,6 +38,7 @@
#include "mirror/object_array-inl.h"
#include "mirror/throwable.h"
#include "object_utils.h"
+#include "quick/inline_method_analyser.h"
#include "reflection.h"
#include "safe_map.h"
#include "scoped_thread_state_change.h"
@@ -48,6 +49,7 @@
#include "thread_list.h"
#include "throw_location.h"
#include "utf.h"
+#include "verifier/method_verifier-inl.h"
#include "well_known_classes.h"
#ifdef HAVE_ANDROID_OS
@@ -101,9 +103,21 @@ struct AllocRecord {
};
struct Breakpoint {
+ // The location of this breakpoint.
mirror::ArtMethod* method;
uint32_t dex_pc;
- Breakpoint(mirror::ArtMethod* method, uint32_t dex_pc) : method(method), dex_pc(dex_pc) {}
+
+ // Indicates whether breakpoint needs full deoptimization or selective deoptimization.
+ bool need_full_deoptimization;
+
+ Breakpoint(mirror::ArtMethod* method, uint32_t dex_pc, bool need_full_deoptimization)
+ : method(method), dex_pc(dex_pc), need_full_deoptimization(need_full_deoptimization) {}
+
+ void VisitRoots(RootCallback* callback, void* arg) {
+ if (method != nullptr) {
+ callback(reinterpret_cast<mirror::Object**>(&method), arg, 0, kRootDebugger);
+ }
+ }
};
static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
@@ -194,21 +208,42 @@ size_t Dbg::alloc_record_head_ = 0;
size_t Dbg::alloc_record_count_ = 0;
// Deoptimization support.
-struct MethodInstrumentationRequest {
- bool deoptimize;
-
- // Method for selective deoptimization. NULL means full deoptimization.
- mirror::ArtMethod* method;
-
- MethodInstrumentationRequest(bool deoptimize, mirror::ArtMethod* method)
- : deoptimize(deoptimize), method(method) {}
-};
-// TODO we need to visit associated methods as roots.
-static std::vector<MethodInstrumentationRequest> gDeoptimizationRequests GUARDED_BY(Locks::deoptimization_lock_);
+Mutex* Dbg::deoptimization_lock_ = nullptr;
+std::vector<DeoptimizationRequest> Dbg::deoptimization_requests_;
+size_t Dbg::full_deoptimization_event_count_ = 0;
// Breakpoints.
static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_);
+void DebugInvokeReq::VisitRoots(RootCallback* callback, void* arg, uint32_t tid,
+ RootType root_type) {
+ if (receiver != nullptr) {
+ callback(&receiver, arg, tid, root_type);
+ }
+ if (thread != nullptr) {
+ callback(&thread, arg, tid, root_type);
+ }
+ if (klass != nullptr) {
+ callback(reinterpret_cast<mirror::Object**>(&klass), arg, tid, root_type);
+ }
+ if (method != nullptr) {
+ callback(reinterpret_cast<mirror::Object**>(&method), arg, tid, root_type);
+ }
+}
+
+void SingleStepControl::VisitRoots(RootCallback* callback, void* arg, uint32_t tid,
+ RootType root_type) {
+ if (method != nullptr) {
+ callback(reinterpret_cast<mirror::Object**>(&method), arg, tid, root_type);
+ }
+}
+
+void DeoptimizationRequest::VisitRoots(RootCallback* callback, void* arg) {
+ if (method != nullptr) {
+ callback(reinterpret_cast<mirror::Object**>(&method), arg, 0, kRootDebugger);
+ }
+}
+
static bool IsBreakpoint(const mirror::ArtMethod* m, uint32_t dex_pc)
LOCKS_EXCLUDED(Locks::breakpoint_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -473,6 +508,7 @@ void Dbg::StartJdwp() {
gRegistry = new ObjectRegistry;
alloc_tracker_lock_ = new Mutex("AllocTracker lock");
+ deoptimization_lock_ = new Mutex("deoptimization lock", kDeoptimizationLock);
// Init JDWP if the debugger is enabled. This may connect out to a
// debugger, passively listen for a debugger, or block waiting for a
// debugger.
@@ -494,6 +530,21 @@ void Dbg::StartJdwp() {
}
}
+void Dbg::VisitRoots(RootCallback* callback, void* arg) {
+ {
+ MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
+ for (Breakpoint& bp : gBreakpoints) {
+ bp.VisitRoots(callback, arg);
+ }
+ }
+ if (deoptimization_lock_ != nullptr) { // only true if the debugger is started.
+ MutexLock mu(Thread::Current(), *deoptimization_lock_);
+ for (DeoptimizationRequest& req : deoptimization_requests_) {
+ req.VisitRoots(callback, arg);
+ }
+ }
+}
+
void Dbg::StopJdwp() {
// Prevent the JDWP thread from processing JDWP incoming packets after we close the connection.
Disposed();
@@ -503,6 +554,8 @@ void Dbg::StopJdwp() {
gRegistry = nullptr;
delete alloc_tracker_lock_;
alloc_tracker_lock_ = nullptr;
+ delete deoptimization_lock_;
+ deoptimization_lock_ = nullptr;
}
void Dbg::GcDidFinish() {
@@ -569,8 +622,9 @@ void Dbg::GoActive() {
}
{
- MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
- CHECK_EQ(gDeoptimizationRequests.size(), 0U);
+ MutexLock mu(Thread::Current(), *deoptimization_lock_);
+ CHECK_EQ(deoptimization_requests_.size(), 0U);
+ CHECK_EQ(full_deoptimization_event_count_, 0U);
}
Runtime* runtime = Runtime::Current();
@@ -610,8 +664,9 @@ void Dbg::Disconnected() {
// Since we're going to disable deoptimization, we clear the deoptimization requests queue.
// This prevents us from having any pending deoptimization request when the debugger attaches
// to us again while no event has been requested yet.
- MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
- gDeoptimizationRequests.clear();
+ MutexLock mu(Thread::Current(), *deoptimization_lock_);
+ deoptimization_requests_.clear();
+ full_deoptimization_event_count_ = 0U;
}
runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener,
instrumentation::Instrumentation::kMethodEntered |
@@ -2510,44 +2565,86 @@ void Dbg::UpdateDebugger(Thread* thread, mirror::Object* this_object,
}
}
-static void ProcessDeoptimizationRequests()
- LOCKS_EXCLUDED(Locks::deoptimization_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
- MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
+// Process request while all mutator threads are suspended.
+void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) {
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
- for (const MethodInstrumentationRequest& request : gDeoptimizationRequests) {
- mirror::ArtMethod* const method = request.method;
- if (method != nullptr) {
- // Selective deoptimization.
- if (request.deoptimize) {
- VLOG(jdwp) << "Deoptimize method " << PrettyMethod(method);
- instrumentation->Deoptimize(method);
- } else {
- VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(method);
- instrumentation->Undeoptimize(method);
+ switch (request.kind) {
+ case DeoptimizationRequest::kNothing:
+ LOG(WARNING) << "Ignoring empty deoptimization request.";
+ break;
+ case DeoptimizationRequest::kFullDeoptimization:
+ VLOG(jdwp) << "Deoptimize the world";
+ instrumentation->DeoptimizeEverything();
+ break;
+ case DeoptimizationRequest::kFullUndeoptimization:
+ VLOG(jdwp) << "Undeoptimize the world";
+ instrumentation->UndeoptimizeEverything();
+ break;
+ case DeoptimizationRequest::kSelectiveDeoptimization:
+ VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.method);
+ instrumentation->Deoptimize(request.method);
+ break;
+ case DeoptimizationRequest::kSelectiveUndeoptimization:
+ VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.method);
+ instrumentation->Undeoptimize(request.method);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported deoptimization request kind " << request.kind;
+ break;
+ }
+}
+
+void Dbg::RequestDeoptimization(const DeoptimizationRequest& req) {
+ if (req.kind == DeoptimizationRequest::kNothing) {
+ // Nothing to do.
+ return;
+ }
+ MutexLock mu(Thread::Current(), *deoptimization_lock_);
+ switch (req.kind) {
+ case DeoptimizationRequest::kFullDeoptimization: {
+ DCHECK(req.method == nullptr);
+ if (full_deoptimization_event_count_ == 0) {
+ VLOG(jdwp) << "Request full deoptimization";
+ deoptimization_requests_.push_back(req);
}
- } else {
- // Full deoptimization.
- if (request.deoptimize) {
- VLOG(jdwp) << "Deoptimize the world";
- instrumentation->DeoptimizeEverything();
- } else {
- VLOG(jdwp) << "Undeoptimize the world";
- instrumentation->UndeoptimizeEverything();
+ ++full_deoptimization_event_count_;
+ break;
+ }
+ case DeoptimizationRequest::kFullUndeoptimization: {
+ DCHECK(req.method == nullptr);
+ DCHECK_GT(full_deoptimization_event_count_, 0U);
+ --full_deoptimization_event_count_;
+ if (full_deoptimization_event_count_ == 0) {
+ VLOG(jdwp) << "Request full undeoptimization";
+ deoptimization_requests_.push_back(req);
}
+ break;
+ }
+ case DeoptimizationRequest::kSelectiveDeoptimization: {
+ DCHECK(req.method != nullptr);
+ VLOG(jdwp) << "Request deoptimization of " << PrettyMethod(req.method);
+ deoptimization_requests_.push_back(req);
+ break;
+ }
+ case DeoptimizationRequest::kSelectiveUndeoptimization: {
+ DCHECK(req.method != nullptr);
+ VLOG(jdwp) << "Request undeoptimization of " << PrettyMethod(req.method);
+ deoptimization_requests_.push_back(req);
+ break;
+ }
+ default: {
+ LOG(FATAL) << "Unknown deoptimization request kind " << req.kind;
+ break;
}
}
- gDeoptimizationRequests.clear();
}
-// Process deoptimization requests after suspending all mutator threads.
void Dbg::ManageDeoptimization() {
Thread* const self = Thread::Current();
{
// Avoid suspend/resume if there is no pending request.
- MutexLock mu(self, *Locks::deoptimization_lock_);
- if (gDeoptimizationRequests.empty()) {
+ MutexLock mu(self, *deoptimization_lock_);
+ if (deoptimization_requests_.empty()) {
return;
}
}
@@ -2557,82 +2654,135 @@ void Dbg::ManageDeoptimization() {
Runtime* const runtime = Runtime::Current();
runtime->GetThreadList()->SuspendAll();
const ThreadState old_state = self->SetStateUnsafe(kRunnable);
- ProcessDeoptimizationRequests();
+ {
+ MutexLock mu(self, *deoptimization_lock_);
+ for (const DeoptimizationRequest& request : deoptimization_requests_) {
+ ProcessDeoptimizationRequest(request);
+ }
+ deoptimization_requests_.clear();
+ }
CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
runtime->GetThreadList()->ResumeAll();
self->TransitionFromSuspendedToRunnable();
}
-// Enable full deoptimization.
-void Dbg::EnableFullDeoptimization() {
- MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
- VLOG(jdwp) << "Request full deoptimization";
- gDeoptimizationRequests.push_back(MethodInstrumentationRequest(true, nullptr));
+static bool IsMethodPossiblyInlined(Thread* self, mirror::ArtMethod* m)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ MethodHelper mh(m);
+ const DexFile::CodeItem* code_item = mh.GetCodeItem();
+ if (code_item == nullptr) {
+ // TODO We should not be asked to watch location in a native or abstract method so the code item
+ // should never be null. We could just check we never encounter this case.
+ return false;
+ }
+ SirtRef<mirror::DexCache> dex_cache(self, mh.GetDexCache());
+ SirtRef<mirror::ClassLoader> class_loader(self, mh.GetClassLoader());
+ verifier::MethodVerifier verifier(&mh.GetDexFile(), &dex_cache, &class_loader,
+ &mh.GetClassDef(), code_item, m->GetDexMethodIndex(), m,
+ m->GetAccessFlags(), false, true);
+ // Note: we don't need to verify the method.
+ return InlineMethodAnalyser::AnalyseMethodCode(&verifier, nullptr);
}
-// Disable full deoptimization.
-void Dbg::DisableFullDeoptimization() {
- MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
- VLOG(jdwp) << "Request full undeoptimization";
- gDeoptimizationRequests.push_back(MethodInstrumentationRequest(false, nullptr));
+static const Breakpoint* FindFirstBreakpointForMethod(mirror::ArtMethod* m)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::breakpoint_lock_) {
+ for (const Breakpoint& breakpoint : gBreakpoints) {
+ if (breakpoint.method == m) {
+ return &breakpoint;
+ }
+ }
+ return nullptr;
}
-void Dbg::WatchLocation(const JDWP::JdwpLocation* location) {
- bool need_deoptimization = true;
- mirror::ArtMethod* m = FromMethodId(location->method_id);
- {
- MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
-
- // If there is no breakpoint on this method yet, we need to deoptimize it.
+// Sanity checks all existing breakpoints on the same method.
+static void SanityCheckExistingBreakpoints(mirror::ArtMethod* m, bool need_full_deoptimization)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::breakpoint_lock_) {
+ if (kIsDebugBuild) {
for (const Breakpoint& breakpoint : gBreakpoints) {
- if (breakpoint.method == m) {
- // We already set a breakpoint on this method, hence we deoptimized it.
- DCHECK(Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
- need_deoptimization = false;
- break;
- }
+ CHECK_EQ(need_full_deoptimization, breakpoint.need_full_deoptimization);
+ }
+ if (need_full_deoptimization) {
+ // We should have deoptimized everything but not "selectively" deoptimized this method.
+ CHECK(Runtime::Current()->GetInstrumentation()->AreAllMethodsDeoptimized());
+ CHECK(!Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
+ } else {
+ // We should have "selectively" deoptimized this method.
+ // Note: while we have not deoptimized everything for this method, we may have done it for
+ // another event.
+ CHECK(Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
}
-
- gBreakpoints.push_back(Breakpoint(m, location->dex_pc));
- VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": " << gBreakpoints[gBreakpoints.size() - 1];
}
+}
- if (need_deoptimization) {
- // Request its deoptimization. This will be done after updating the JDWP event list.
- MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
- gDeoptimizationRequests.push_back(MethodInstrumentationRequest(true, m));
- VLOG(jdwp) << "Request deoptimization of " << PrettyMethod(m);
+// Installs a breakpoint at the specified location. Also indicates through the deoptimization
+// request if we need to deoptimize.
+void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
+ Thread* const self = Thread::Current();
+ mirror::ArtMethod* m = FromMethodId(location->method_id);
+ DCHECK(m != nullptr) << "No method for method id " << location->method_id;
+
+ MutexLock mu(self, *Locks::breakpoint_lock_);
+ const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m);
+ bool need_full_deoptimization;
+ if (existing_breakpoint == nullptr) {
+ // There is no breakpoint on this method yet: we need to deoptimize. If this method may be
+ // inlined, we deoptimize everything; otherwise we deoptimize only this method.
+ need_full_deoptimization = IsMethodPossiblyInlined(self, m);
+ if (need_full_deoptimization) {
+ req->kind = DeoptimizationRequest::kFullDeoptimization;
+ req->method = nullptr;
+ } else {
+ req->kind = DeoptimizationRequest::kSelectiveDeoptimization;
+ req->method = m;
+ }
+ } else {
+ // There is at least one breakpoint for this method: we don't need to deoptimize.
+ req->kind = DeoptimizationRequest::kNothing;
+ req->method = nullptr;
+
+ need_full_deoptimization = existing_breakpoint->need_full_deoptimization;
+ SanityCheckExistingBreakpoints(m, need_full_deoptimization);
}
+
+ gBreakpoints.push_back(Breakpoint(m, location->dex_pc, need_full_deoptimization));
+ VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": "
+ << gBreakpoints[gBreakpoints.size() - 1];
}
-void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location) {
- bool can_undeoptimize = true;
+// Uninstalls a breakpoint at the specified location. Also indicates through the deoptimization
+// request if we need to undeoptimize.
+void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
mirror::ArtMethod* m = FromMethodId(location->method_id);
- DCHECK(Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
- {
- MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
- for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
- if (gBreakpoints[i].method == m && gBreakpoints[i].dex_pc == location->dex_pc) {
- VLOG(jdwp) << "Removed breakpoint #" << i << ": " << gBreakpoints[i];
- gBreakpoints.erase(gBreakpoints.begin() + i);
- break;
- }
- }
+ DCHECK(m != nullptr) << "No method for method id " << location->method_id;
- // If there is no breakpoint on this method, we can undeoptimize it.
- for (const Breakpoint& breakpoint : gBreakpoints) {
- if (breakpoint.method == m) {
- can_undeoptimize = false;
- break;
- }
+ MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
+ bool need_full_deoptimization = false;
+ for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
+ if (gBreakpoints[i].method == m && gBreakpoints[i].dex_pc == location->dex_pc) {
+ VLOG(jdwp) << "Removed breakpoint #" << i << ": " << gBreakpoints[i];
+ need_full_deoptimization = gBreakpoints[i].need_full_deoptimization;
+ DCHECK_NE(need_full_deoptimization, Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
+ gBreakpoints.erase(gBreakpoints.begin() + i);
+ break;
}
}
-
- if (can_undeoptimize) {
- // Request its undeoptimization. This will be done after updating the JDWP event list.
- MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
- gDeoptimizationRequests.push_back(MethodInstrumentationRequest(false, m));
- VLOG(jdwp) << "Request undeoptimization of " << PrettyMethod(m);
+ const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m);
+ if (existing_breakpoint == nullptr) {
+ // There is no more breakpoint on this method: we need to undeoptimize.
+ if (need_full_deoptimization) {
+ // This method required full deoptimization: we need to undeoptimize everything.
+ req->kind = DeoptimizationRequest::kFullUndeoptimization;
+ req->method = nullptr;
+ } else {
+ // This method required selective deoptimization: we need to undeoptimize only that method.
+ req->kind = DeoptimizationRequest::kSelectiveUndeoptimization;
+ req->method = m;
+ }
+ } else {
+ // There is at least one breakpoint for this method: we don't need to undeoptimize.
+ req->kind = DeoptimizationRequest::kNothing;
+ req->method = nullptr;
+ SanityCheckExistingBreakpoints(m, need_full_deoptimization);
}
}
@@ -3072,7 +3222,7 @@ void Dbg::ExecuteMethod(DebugInvokeReq* pReq) {
// Translate the method through the vtable, unless the debugger wants to suppress it.
SirtRef<mirror::ArtMethod> m(soa.Self(), pReq->method);
if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver != NULL) {
- mirror::ArtMethod* actual_method = pReq->klass->FindVirtualMethodForVirtualOrInterface(pReq->method);
+ mirror::ArtMethod* actual_method = pReq->klass->FindVirtualMethodForVirtualOrInterface(m.get());
if (actual_method != m.get()) {
VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m.get()) << " to " << PrettyMethod(actual_method);
m.reset(actual_method);
@@ -3085,7 +3235,7 @@ void Dbg::ExecuteMethod(DebugInvokeReq* pReq) {
CHECK_EQ(sizeof(jvalue), sizeof(uint64_t));
- pReq->result_value = InvokeWithJValues(soa, pReq->receiver, soa.EncodeMethod(pReq->method),
+ pReq->result_value = InvokeWithJValues(soa, pReq->receiver, soa.EncodeMethod(m.get()),
reinterpret_cast<jvalue*>(pReq->arg_values));
mirror::Throwable* exception = soa.Self()->GetException(NULL);
@@ -3731,22 +3881,27 @@ static size_t GetAllocTrackerMax() {
}
void Dbg::SetAllocTrackingEnabled(bool enabled) {
- MutexLock mu(Thread::Current(), *alloc_tracker_lock_);
if (enabled) {
- if (recent_allocation_records_ == NULL) {
- alloc_record_max_ = GetAllocTrackerMax();
- LOG(INFO) << "Enabling alloc tracker (" << alloc_record_max_ << " entries of "
- << kMaxAllocRecordStackDepth << " frames, taking "
- << PrettySize(sizeof(AllocRecord) * alloc_record_max_) << ")";
- alloc_record_head_ = alloc_record_count_ = 0;
- recent_allocation_records_ = new AllocRecord[alloc_record_max_];
- CHECK(recent_allocation_records_ != NULL);
+ {
+ MutexLock mu(Thread::Current(), *alloc_tracker_lock_);
+ if (recent_allocation_records_ == NULL) {
+ alloc_record_max_ = GetAllocTrackerMax();
+ LOG(INFO) << "Enabling alloc tracker (" << alloc_record_max_ << " entries of "
+ << kMaxAllocRecordStackDepth << " frames, taking "
+ << PrettySize(sizeof(AllocRecord) * alloc_record_max_) << ")";
+ alloc_record_head_ = alloc_record_count_ = 0;
+ recent_allocation_records_ = new AllocRecord[alloc_record_max_];
+ CHECK(recent_allocation_records_ != NULL);
+ }
}
Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
} else {
Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
- delete[] recent_allocation_records_;
- recent_allocation_records_ = NULL;
+ {
+ MutexLock mu(Thread::Current(), *alloc_tracker_lock_);
+ delete[] recent_allocation_records_;
+ recent_allocation_records_ = NULL;
+ }
}
}
@@ -3861,7 +4016,7 @@ void Dbg::DumpRecentAllocations() {
}
}
-void Dbg::UpdateObjectPointers(IsMarkedCallback* visitor, void* arg) {
+void Dbg::UpdateObjectPointers(IsMarkedCallback* callback, void* arg) {
if (recent_allocation_records_ != nullptr) {
MutexLock mu(Thread::Current(), *alloc_tracker_lock_);
size_t i = HeadIndex();
@@ -3869,12 +4024,12 @@ void Dbg::UpdateObjectPointers(IsMarkedCallback* visitor, void* arg) {
while (count--) {
AllocRecord* record = &recent_allocation_records_[i];
DCHECK(record != nullptr);
- record->UpdateObjectPointers(visitor, arg);
+ record->UpdateObjectPointers(callback, arg);
i = (i + 1) & (alloc_record_max_ - 1);
}
}
if (gRegistry != nullptr) {
- gRegistry->UpdateObjectPointers(visitor, arg);
+ gRegistry->UpdateObjectPointers(callback, arg);
}
}
diff --git a/runtime/debugger.h b/runtime/debugger.h
index 6569cc4..23c9c6a 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -25,6 +25,7 @@
#include <set>
#include <string>
+#include <vector>
#include "jdwp/jdwp.h"
#include "jni.h"
@@ -81,6 +82,9 @@ struct DebugInvokeReq {
Mutex lock DEFAULT_MUTEX_ACQUIRED_AFTER;
ConditionVariable cond GUARDED_BY(lock);
+ void VisitRoots(RootCallback* callback, void* arg, uint32_t tid, RootType root_type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
private:
DISALLOW_COPY_AND_ASSIGN(DebugInvokeReq);
};
@@ -111,10 +115,32 @@ struct SingleStepControl {
// single-step depth.
int stack_depth;
+ void VisitRoots(RootCallback* callback, void* arg, uint32_t tid, RootType root_type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
private:
DISALLOW_COPY_AND_ASSIGN(SingleStepControl);
};
+struct DeoptimizationRequest {
+ enum Kind {
+ kNothing, // no action.
+ kFullDeoptimization, // deoptimize everything.
+ kFullUndeoptimization, // undeoptimize everything.
+ kSelectiveDeoptimization, // deoptimize one method.
+ kSelectiveUndeoptimization // undeoptimize one method.
+ };
+
+ DeoptimizationRequest() : kind(kNothing), method(nullptr) {}
+
+ void VisitRoots(RootCallback* callback, void* arg);
+
+ Kind kind;
+
+ // Method for selective deoptimization.
+ mirror::ArtMethod* method;
+};
+
class Dbg {
public:
static bool ParseJdwpOptions(const std::string& options);
@@ -138,8 +164,8 @@ class Dbg {
*/
static void Connected();
static void GoActive()
- LOCKS_EXCLUDED(Locks::breakpoint_lock_, Locks::deoptimization_lock_, Locks::mutator_lock_);
- static void Disconnected() LOCKS_EXCLUDED(Locks::deoptimization_lock_, Locks::mutator_lock_);
+ LOCKS_EXCLUDED(Locks::breakpoint_lock_, deoptimization_lock_, Locks::mutator_lock_);
+ static void Disconnected() LOCKS_EXCLUDED(deoptimization_lock_, Locks::mutator_lock_);
static void Disposed();
// Returns true if we're actually debugging with a real debugger, false if it's
@@ -401,26 +427,23 @@ class Dbg {
LOCKS_EXCLUDED(Locks::breakpoint_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Full Deoptimization control. Only used for method entry/exit and single-stepping.
- static void EnableFullDeoptimization()
- LOCKS_EXCLUDED(Locks::deoptimization_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void DisableFullDeoptimization()
- LOCKS_EXCLUDED(Locks::deoptimization_lock_)
+ // Records deoptimization request in the queue.
+ static void RequestDeoptimization(const DeoptimizationRequest& req)
+ LOCKS_EXCLUDED(deoptimization_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Manage deoptimization after updating JDWP events list. This must be done while all mutator
- // threads are suspended.
+ // Manage deoptimization after updating JDWP events list. Suspends all threads, processes each
+ // request and finally resumes all threads.
static void ManageDeoptimization()
- LOCKS_EXCLUDED(Locks::deoptimization_lock_)
+ LOCKS_EXCLUDED(deoptimization_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Breakpoints.
- static void WatchLocation(const JDWP::JdwpLocation* pLoc)
- LOCKS_EXCLUDED(Locks::breakpoint_lock_, Locks::deoptimization_lock_)
+ static void WatchLocation(const JDWP::JdwpLocation* pLoc, DeoptimizationRequest* req)
+ LOCKS_EXCLUDED(Locks::breakpoint_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void UnwatchLocation(const JDWP::JdwpLocation* pLoc)
- LOCKS_EXCLUDED(Locks::breakpoint_lock_, Locks::deoptimization_lock_)
+ static void UnwatchLocation(const JDWP::JdwpLocation* pLoc, DeoptimizationRequest* req)
+ LOCKS_EXCLUDED(Locks::breakpoint_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Single-stepping.
@@ -459,6 +482,9 @@ class Dbg {
static void DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void VisitRoots(RootCallback* callback, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
/*
* Recent allocation tracking support.
*/
@@ -512,6 +538,9 @@ class Dbg {
static void PostThreadStartOrStop(Thread*, uint32_t)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void ProcessDeoptimizationRequest(const DeoptimizationRequest& request)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+
static Mutex* alloc_tracker_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
static AllocRecord* recent_allocation_records_ PT_GUARDED_BY(alloc_tracker_lock_);
@@ -519,6 +548,20 @@ class Dbg {
static size_t alloc_record_head_ GUARDED_BY(alloc_tracker_lock_);
static size_t alloc_record_count_ GUARDED_BY(alloc_tracker_lock_);
+ // Guards deoptimization requests.
+ static Mutex* deoptimization_lock_ ACQUIRED_AFTER(Locks::breakpoint_lock_);
+
+ // Deoptimization requests to be processed each time the event list is updated. This is used when
+ // registering and unregistering events so we do not deoptimize while holding the event list
+ // lock.
+ static std::vector<DeoptimizationRequest> deoptimization_requests_ GUARDED_BY(deoptimization_lock_);
+
+ // Count the number of events requiring full deoptimization. When the counter is > 0, everything
+ // is deoptimized, otherwise everything is undeoptimized.
+ // Note: we fully deoptimize on the first event only (when the counter is set to 1). We fully
+ // undeoptimize when the last event is unregistered (when the counter is set to 0).
+ static size_t full_deoptimization_event_count_ GUARDED_BY(deoptimization_lock_);
+
DISALLOW_COPY_AND_ASSIGN(Dbg);
};
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 829ec4a..9e5f54c 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -200,7 +200,7 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessUnchecked& soa, const char
}
ThrowLocation throw_location(rcvr, proxy_method, -1);
JValue result_unboxed;
- if (!UnboxPrimitiveForResult(throw_location, result_ref, result_type, result_unboxed)) {
+ if (!UnboxPrimitiveForResult(throw_location, result_ref, result_type, &result_unboxed)) {
DCHECK(soa.Self()->IsExceptionPending());
return zero;
}
diff --git a/runtime/gc/accounting/heap_bitmap-inl.h b/runtime/gc/accounting/heap_bitmap-inl.h
index 18b93d4..04e85d2 100644
--- a/runtime/gc/accounting/heap_bitmap-inl.h
+++ b/runtime/gc/accounting/heap_bitmap-inl.h
@@ -19,6 +19,8 @@
#include "heap_bitmap.h"
+#include "space_bitmap-inl.h"
+
namespace art {
namespace gc {
namespace accounting {
@@ -34,6 +36,55 @@ inline void HeapBitmap::Visit(const Visitor& visitor) {
}
}
+inline bool HeapBitmap::Test(const mirror::Object* obj) {
+ SpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
+ if (LIKELY(bitmap != nullptr)) {
+ return bitmap->Test(obj);
+ } else {
+ return GetDiscontinuousSpaceObjectSet(obj) != NULL;
+ }
+}
+
+inline void HeapBitmap::Clear(const mirror::Object* obj) {
+ SpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
+ if (LIKELY(bitmap != nullptr)) {
+ bitmap->Clear(obj);
+ } else {
+ ObjectSet* set = GetDiscontinuousSpaceObjectSet(obj);
+ DCHECK(set != NULL);
+ set->Clear(obj);
+ }
+}
+
+inline void HeapBitmap::Set(const mirror::Object* obj) {
+ SpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
+ if (LIKELY(bitmap != NULL)) {
+ bitmap->Set(obj);
+ } else {
+ ObjectSet* set = GetDiscontinuousSpaceObjectSet(obj);
+ DCHECK(set != NULL);
+ set->Set(obj);
+ }
+}
+
+inline SpaceBitmap* HeapBitmap::GetContinuousSpaceBitmap(const mirror::Object* obj) const {
+ for (const auto& bitmap : continuous_space_bitmaps_) {
+ if (bitmap->HasAddress(obj)) {
+ return bitmap;
+ }
+ }
+ return nullptr;
+}
+
+inline ObjectSet* HeapBitmap::GetDiscontinuousSpaceObjectSet(const mirror::Object* obj) const {
+ for (const auto& space_set : discontinuous_space_sets_) {
+ if (space_set->Test(obj)) {
+ return space_set;
+ }
+ }
+ return nullptr;
+}
+
} // namespace accounting
} // namespace gc
} // namespace art
diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h
index 7cfeb63..f729c0e 100644
--- a/runtime/gc/accounting/heap_bitmap.h
+++ b/runtime/gc/accounting/heap_bitmap.h
@@ -31,57 +31,11 @@ namespace accounting {
class HeapBitmap {
public:
- typedef std::vector<SpaceBitmap*, GcAllocator<SpaceBitmap*> > SpaceBitmapVector;
- typedef std::vector<ObjectSet*, GcAllocator<ObjectSet*> > ObjectSetVector;
-
- bool Test(const mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
- SpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
- if (LIKELY(bitmap != NULL)) {
- return bitmap->Test(obj);
- } else {
- return GetDiscontinuousSpaceObjectSet(obj) != NULL;
- }
- }
-
- void Clear(const mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
- SpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
- if (LIKELY(bitmap != NULL)) {
- bitmap->Clear(obj);
- } else {
- ObjectSet* set = GetDiscontinuousSpaceObjectSet(obj);
- DCHECK(set != NULL);
- set->Clear(obj);
- }
- }
-
- void Set(const mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
- SpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
- if (LIKELY(bitmap != NULL)) {
- bitmap->Set(obj);
- } else {
- ObjectSet* set = GetDiscontinuousSpaceObjectSet(obj);
- DCHECK(set != NULL);
- set->Set(obj);
- }
- }
-
- SpaceBitmap* GetContinuousSpaceBitmap(const mirror::Object* obj) {
- for (const auto& bitmap : continuous_space_bitmaps_) {
- if (bitmap->HasAddress(obj)) {
- return bitmap;
- }
- }
- return NULL;
- }
-
- ObjectSet* GetDiscontinuousSpaceObjectSet(const mirror::Object* obj) {
- for (const auto& space_set : discontinuous_space_sets_) {
- if (space_set->Test(obj)) {
- return space_set;
- }
- }
- return NULL;
- }
+ bool Test(const mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ void Clear(const mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ void Set(const mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SpaceBitmap* GetContinuousSpaceBitmap(const mirror::Object* obj) const;
+ ObjectSet* GetDiscontinuousSpaceObjectSet(const mirror::Object* obj) const;
void Walk(ObjectCallback* callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -110,10 +64,10 @@ class HeapBitmap {
void RemoveDiscontinuousObjectSet(ObjectSet* set);
// Bitmaps covering continuous spaces.
- SpaceBitmapVector continuous_space_bitmaps_;
+ std::vector<SpaceBitmap*, GcAllocator<SpaceBitmap*>> continuous_space_bitmaps_;
// Sets covering discontinuous spaces.
- ObjectSetVector discontinuous_space_sets_;
+ std::vector<ObjectSet*, GcAllocator<ObjectSet*>> discontinuous_space_sets_;
friend class art::gc::Heap;
};
diff --git a/runtime/gc/accounting/mod_union_table-inl.h b/runtime/gc/accounting/mod_union_table-inl.h
index 19c6768..c756127 100644
--- a/runtime/gc/accounting/mod_union_table-inl.h
+++ b/runtime/gc/accounting/mod_union_table-inl.h
@@ -32,41 +32,8 @@ class ModUnionTableToZygoteAllocspace : public ModUnionTableReferenceCache {
space::ContinuousSpace* space)
: ModUnionTableReferenceCache(name, heap, space) {}
- bool AddReference(const mirror::Object* /* obj */, const mirror::Object* ref) ALWAYS_INLINE {
- const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(); it != spaces.end(); ++it) {
- if ((*it)->Contains(ref)) {
- return (*it)->IsMallocSpace();
- }
- }
- // Assume it points to a large object.
- // TODO: Check.
- return true;
- }
-};
-
-// A mod-union table to record Zygote references to the alloc space.
-class ModUnionTableToAllocspace : public ModUnionTableReferenceCache {
- public:
- explicit ModUnionTableToAllocspace(const std::string& name, Heap* heap,
- space::ContinuousSpace* space)
- : ModUnionTableReferenceCache(name, heap, space) {}
-
- bool AddReference(const mirror::Object* /* obj */, const mirror::Object* ref) ALWAYS_INLINE {
- const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(); it != spaces.end(); ++it) {
- space::ContinuousSpace* space = *it;
- if (space->Contains(ref)) {
- // The allocation space is always considered for collection whereas the Zygote space is
- // only considered for full GC.
- return space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect;
- }
- }
- // Assume it points to a large object.
- // TODO: Check.
- return true;
+ bool ShouldAddReference(const mirror::Object* ref) const OVERRIDE ALWAYS_INLINE {
+ return !space_->HasAddress(ref);
}
};
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 8871921..34ca654 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -70,37 +70,29 @@ class ModUnionClearCardVisitor {
class ModUnionUpdateObjectReferencesVisitor {
public:
- ModUnionUpdateObjectReferencesVisitor(MarkObjectCallback* callback, void* arg)
+ ModUnionUpdateObjectReferencesVisitor(MarkHeapReferenceCallback* callback, void* arg)
: callback_(callback),
arg_(arg) {
}
// Extra parameters are required since we use this same visitor signature for checking objects.
- void operator()(Object* obj, Object* ref, const MemberOffset& offset,
- bool /* is_static */) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Only add the reference if it is non null and fits our criteria.
- if (ref != nullptr) {
- Object* new_ref = callback_(ref, arg_);
- if (new_ref != ref) {
- // Use SetFieldObjectWithoutWriteBarrier to avoid card mark as an optimization which
- // reduces dirtied pages and improves performance.
- if (Runtime::Current()->IsActiveTransaction()) {
- obj->SetFieldObjectWithoutWriteBarrier<true>(offset, new_ref, true);
- } else {
- obj->SetFieldObjectWithoutWriteBarrier<false>(offset, new_ref, true);
- }
- }
+ mirror::HeapReference<Object>* obj_ptr = obj->GetFieldObjectReferenceAddr(offset);
+ if (obj_ptr->AsMirrorPtr() != nullptr) {
+ callback_(obj_ptr, arg_);
}
}
private:
- MarkObjectCallback* const callback_;
+ MarkHeapReferenceCallback* const callback_;
void* arg_;
};
class ModUnionScanImageRootVisitor {
public:
- ModUnionScanImageRootVisitor(MarkObjectCallback* callback, void* arg)
+ ModUnionScanImageRootVisitor(MarkHeapReferenceCallback* callback, void* arg)
: callback_(callback), arg_(arg) {}
void operator()(Object* root) const
@@ -108,11 +100,11 @@ class ModUnionScanImageRootVisitor {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(root != NULL);
ModUnionUpdateObjectReferencesVisitor ref_visitor(callback_, arg_);
- collector::MarkSweep::VisitObjectReferences(root, ref_visitor, true);
+ root->VisitReferences<kMovingClasses>(ref_visitor);
}
private:
- MarkObjectCallback* const callback_;
+ MarkHeapReferenceCallback* const callback_;
void* const arg_;
};
@@ -131,12 +123,14 @@ class AddToReferenceArrayVisitor {
}
// Extra parameters are required since we use this same visitor signature for checking objects.
- void operator()(Object* obj, Object* ref, const MemberOffset& offset,
- bool /* is_static */) const {
+ void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::HeapReference<Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset);
+ mirror::Object* ref = ref_ptr->AsMirrorPtr();
// Only add the reference if it is non null and fits our criteria.
- if (ref != nullptr && mod_union_table_->AddReference(obj, ref)) {
+ if (ref != nullptr && mod_union_table_->ShouldAddReference(ref)) {
// Push the adddress of the reference.
- references_->push_back(obj->GetFieldObjectReferenceAddr(offset));
+ references_->push_back(ref_ptr);
}
}
@@ -155,11 +149,10 @@ class ModUnionReferenceVisitor {
void operator()(Object* obj) const
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
- DCHECK(obj != NULL);
// We don't have an early exit since we use the visitor pattern, an early
// exit should significantly speed this up.
AddToReferenceArrayVisitor visitor(mod_union_table_, references_);
- collector::MarkSweep::VisitObjectReferences(obj, visitor, true);
+ obj->VisitReferences<kMovingClasses>(visitor);
}
private:
ModUnionTableReferenceCache* const mod_union_table_;
@@ -175,20 +168,22 @@ class CheckReferenceVisitor {
}
// Extra parameters are required since we use this same visitor signature for checking objects.
- void operator()(Object* obj, Object* ref,
- const MemberOffset& /* offset */, bool /* is_static */) const
+ void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
- Heap* heap = mod_union_table_->GetHeap();
- if (ref != NULL && mod_union_table_->AddReference(obj, ref) &&
+ mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset, false);
+ if (ref != nullptr && mod_union_table_->ShouldAddReference(ref) &&
references_.find(ref) == references_.end()) {
+ Heap* heap = mod_union_table_->GetHeap();
space::ContinuousSpace* from_space = heap->FindContinuousSpaceFromObject(obj, false);
space::ContinuousSpace* to_space = heap->FindContinuousSpaceFromObject(ref, false);
- LOG(INFO) << "Object " << reinterpret_cast<const void*>(obj) << "(" << PrettyTypeOf(obj) << ")"
- << "References " << reinterpret_cast<const void*>(ref)
- << "(" << PrettyTypeOf(ref) << ") without being in mod-union table";
- LOG(INFO) << "FromSpace " << from_space->GetName() << " type " << from_space->GetGcRetentionPolicy();
- LOG(INFO) << "ToSpace " << to_space->GetName() << " type " << to_space->GetGcRetentionPolicy();
- mod_union_table_->GetHeap()->DumpSpaces();
+ LOG(INFO) << "Object " << reinterpret_cast<const void*>(obj) << "(" << PrettyTypeOf(obj)
+ << ")" << "References " << reinterpret_cast<const void*>(ref) << "(" << PrettyTypeOf(ref)
+ << ") without being in mod-union table";
+ LOG(INFO) << "FromSpace " << from_space->GetName() << " type "
+ << from_space->GetGcRetentionPolicy();
+ LOG(INFO) << "ToSpace " << to_space->GetName() << " type "
+ << to_space->GetGcRetentionPolicy();
+ heap->DumpSpaces();
LOG(FATAL) << "FATAL ERROR";
}
}
@@ -208,9 +203,8 @@ class ModUnionCheckReferences {
void operator()(Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
- DCHECK(obj != NULL);
CheckReferenceVisitor visitor(mod_union_table_, references_);
- collector::MarkSweep::VisitObjectReferences(obj, visitor, true);
+ obj->VisitReferences<kMovingClasses>(visitor);
}
private:
@@ -264,10 +258,9 @@ void ModUnionTableReferenceCache::Dump(std::ostream& os) {
}
}
-void ModUnionTableReferenceCache::UpdateAndMarkReferences(MarkObjectCallback* callback,
+void ModUnionTableReferenceCache::UpdateAndMarkReferences(MarkHeapReferenceCallback* callback,
void* arg) {
- Heap* heap = GetHeap();
- CardTable* card_table = heap->GetCardTable();
+ CardTable* card_table = heap_->GetCardTable();
std::vector<mirror::HeapReference<Object>*> cards_references;
ModUnionReferenceVisitor add_visitor(this, &cards_references);
@@ -277,7 +270,7 @@ void ModUnionTableReferenceCache::UpdateAndMarkReferences(MarkObjectCallback* ca
cards_references.clear();
uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
uintptr_t end = start + CardTable::kCardSize;
- auto* space = heap->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false);
+ auto* space = heap_->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false);
DCHECK(space != nullptr);
SpaceBitmap* live_bitmap = space->GetLiveBitmap();
live_bitmap->VisitMarkedRange(start, end, add_visitor);
@@ -298,14 +291,7 @@ void ModUnionTableReferenceCache::UpdateAndMarkReferences(MarkObjectCallback* ca
size_t count = 0;
for (const auto& ref : references_) {
for (mirror::HeapReference<Object>* obj_ptr : ref.second) {
- Object* obj = obj_ptr->AsMirrorPtr();
- if (obj != nullptr) {
- Object* new_obj = callback(obj, arg);
- // Avoid dirtying pages in the image unless necessary.
- if (new_obj != obj) {
- obj_ptr->Assign(new_obj);
- }
- }
+ callback(obj_ptr, arg);
}
count += ref.second.size();
}
@@ -322,7 +308,8 @@ void ModUnionTableCardCache::ClearCards() {
}
// Mark all references to the alloc space(s).
-void ModUnionTableCardCache::UpdateAndMarkReferences(MarkObjectCallback* callback, void* arg) {
+void ModUnionTableCardCache::UpdateAndMarkReferences(MarkHeapReferenceCallback* callback,
+ void* arg) {
CardTable* card_table = heap_->GetCardTable();
ModUnionScanImageRootVisitor scan_visitor(callback, arg);
SpaceBitmap* bitmap = space_->GetLiveBitmap();
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index 2e22a11..c3a90e2 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -69,7 +69,7 @@ class ModUnionTable {
// Update the mod-union table using data stored by ClearCards. There may be multiple ClearCards
// before a call to update, for example, back-to-back sticky GCs. Also mark references to other
// spaces which are stored in the mod-union table.
- virtual void UpdateAndMarkReferences(MarkObjectCallback* callback, void* arg) = 0;
+ virtual void UpdateAndMarkReferences(MarkHeapReferenceCallback* callback, void* arg) = 0;
// Verification, sanity checks that we don't have clean cards which conflict with out cached data
// for said cards. Exclusive lock is required since verify sometimes uses
@@ -106,7 +106,7 @@ class ModUnionTableReferenceCache : public ModUnionTable {
void ClearCards();
// Update table based on cleared cards and mark all references to the other spaces.
- void UpdateAndMarkReferences(MarkObjectCallback* callback, void* arg)
+ void UpdateAndMarkReferences(MarkHeapReferenceCallback* callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -117,7 +117,7 @@ class ModUnionTableReferenceCache : public ModUnionTable {
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Function that tells whether or not to add a reference to the table.
- virtual bool AddReference(const mirror::Object* obj, const mirror::Object* ref) = 0;
+ virtual bool ShouldAddReference(const mirror::Object* ref) const = 0;
void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -142,7 +142,7 @@ class ModUnionTableCardCache : public ModUnionTable {
void ClearCards();
// Mark all references to the alloc space(s).
- void UpdateAndMarkReferences(MarkObjectCallback* callback, void* arg)
+ void UpdateAndMarkReferences(MarkHeapReferenceCallback* callback, void* arg)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index e6508dc..56f7caa 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -60,28 +60,25 @@ void RememberedSet::ClearCards() {
class RememberedSetReferenceVisitor {
public:
- RememberedSetReferenceVisitor(MarkObjectCallback* callback, space::ContinuousSpace* target_space,
+ RememberedSetReferenceVisitor(MarkHeapReferenceCallback* callback,
+ space::ContinuousSpace* target_space,
bool* const contains_reference_to_target_space, void* arg)
: callback_(callback), target_space_(target_space), arg_(arg),
contains_reference_to_target_space_(contains_reference_to_target_space) {}
- void operator()(mirror::Object* obj, mirror::Object* ref,
- const MemberOffset& offset, bool /* is_static */) const
+ void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (ref != nullptr) {
- if (target_space_->HasAddress(ref)) {
- *contains_reference_to_target_space_ = true;
- mirror::Object* new_ref = callback_(ref, arg_);
- DCHECK(!target_space_->HasAddress(new_ref));
- if (new_ref != ref) {
- obj->SetFieldObjectWithoutWriteBarrier<false>(offset, new_ref, false);
- }
- }
+ DCHECK(obj != nullptr);
+ mirror::HeapReference<mirror::Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset);
+ if (target_space_->HasAddress(ref_ptr->AsMirrorPtr())) {
+ *contains_reference_to_target_space_ = true;
+ callback_(ref_ptr, arg_);
+ DCHECK(!target_space_->HasAddress(ref_ptr->AsMirrorPtr()));
}
}
private:
- MarkObjectCallback* const callback_;
+ MarkHeapReferenceCallback* const callback_;
space::ContinuousSpace* const target_space_;
void* const arg_;
bool* const contains_reference_to_target_space_;
@@ -89,27 +86,27 @@ class RememberedSetReferenceVisitor {
class RememberedSetObjectVisitor {
public:
- RememberedSetObjectVisitor(MarkObjectCallback* callback, space::ContinuousSpace* target_space,
+ RememberedSetObjectVisitor(MarkHeapReferenceCallback* callback,
+ space::ContinuousSpace* target_space,
bool* const contains_reference_to_target_space, void* arg)
: callback_(callback), target_space_(target_space), arg_(arg),
contains_reference_to_target_space_(contains_reference_to_target_space) {}
void operator()(mirror::Object* obj) const EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(obj != NULL);
RememberedSetReferenceVisitor ref_visitor(callback_, target_space_,
contains_reference_to_target_space_, arg_);
- collector::MarkSweep::VisitObjectReferences(obj, ref_visitor, true);
+ obj->VisitReferences<kMovingClasses>(ref_visitor);
}
private:
- MarkObjectCallback* const callback_;
+ MarkHeapReferenceCallback* const callback_;
space::ContinuousSpace* const target_space_;
void* const arg_;
bool* const contains_reference_to_target_space_;
};
-void RememberedSet::UpdateAndMarkReferences(MarkObjectCallback* callback,
+void RememberedSet::UpdateAndMarkReferences(MarkHeapReferenceCallback* callback,
space::ContinuousSpace* target_space, void* arg) {
CardTable* card_table = heap_->GetCardTable();
bool contains_reference_to_target_space = false;
@@ -155,7 +152,8 @@ void RememberedSet::AssertAllDirtyCardsAreWithinSpace() const {
for (const byte* card_addr : dirty_cards_) {
auto start = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr));
auto end = start + CardTable::kCardSize;
- DCHECK(space_->Begin() <= start && end <= space_->End());
+ DCHECK_LE(space_->Begin(), start);
+ DCHECK_LE(end, space_->Limit());
}
}
diff --git a/runtime/gc/accounting/remembered_set.h b/runtime/gc/accounting/remembered_set.h
index 92feeb1..4ed20dd 100644
--- a/runtime/gc/accounting/remembered_set.h
+++ b/runtime/gc/accounting/remembered_set.h
@@ -52,7 +52,7 @@ class RememberedSet {
void ClearCards();
// Mark through all references to the target space.
- void UpdateAndMarkReferences(MarkObjectCallback* callback,
+ void UpdateAndMarkReferences(MarkHeapReferenceCallback* callback,
space::ContinuousSpace* target_space, void* arg)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index ace9f9e..19fdc63 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -1652,6 +1652,30 @@ void RosAlloc::RevokeAllThreadLocalRuns() {
}
}
+void RosAlloc::AssertThreadLocalRunsAreRevoked(Thread* thread) {
+ if (kIsDebugBuild) {
+ Thread* self = Thread::Current();
+ // Avoid race conditions on the bulk free bit maps with BulkFree() (GC).
+ WriterMutexLock wmu(self, bulk_free_lock_);
+ for (size_t idx = 0; idx < kNumOfSizeBrackets; idx++) {
+ MutexLock mu(self, *size_bracket_locks_[idx]);
+ Run* thread_local_run = reinterpret_cast<Run*>(thread->rosalloc_runs_[idx]);
+ DCHECK(thread_local_run == nullptr);
+ }
+ }
+}
+
+void RosAlloc::AssertAllThreadLocalRunsAreRevoked() {
+ if (kIsDebugBuild) {
+ MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
+ MutexLock mu2(Thread::Current(), *Locks::thread_list_lock_);
+ std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
+ for (Thread* t : thread_list) {
+ AssertThreadLocalRunsAreRevoked(t);
+ }
+ }
+}
+
void RosAlloc::Initialize() {
// Check the consistency of the number of size brackets.
DCHECK_EQ(Thread::kRosAllocNumOfSizeBrackets, kNumOfSizeBrackets);
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 738d917..0b4b189 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -549,6 +549,10 @@ class RosAlloc {
void RevokeThreadLocalRuns(Thread* thread);
// Releases the thread-local runs assigned to all the threads back to the common set of runs.
void RevokeAllThreadLocalRuns() LOCKS_EXCLUDED(Locks::thread_list_lock_);
+ // Assert the thread local runs of a thread are revoked.
+ void AssertThreadLocalRunsAreRevoked(Thread* thread);
+ // Assert all the thread local runs are revoked.
+ void AssertAllThreadLocalRunsAreRevoked() LOCKS_EXCLUDED(Locks::thread_list_lock_);
// Dumps the page map for debugging.
std::string DumpPageMap() EXCLUSIVE_LOCKS_REQUIRED(lock_);
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
new file mode 100644
index 0000000..079eeba
--- /dev/null
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "concurrent_copying.h"
+
+namespace art {
+namespace gc {
+namespace collector {
+
+} // namespace collector
+} // namespace gc
+} // namespace art
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
new file mode 100644
index 0000000..ab26a9c
--- /dev/null
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
+#define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
+
+#include "garbage_collector.h"
+
+namespace art {
+namespace gc {
+namespace collector {
+
+class ConcurrentCopying : public GarbageCollector {
+ public:
+ explicit ConcurrentCopying(Heap* heap, bool generational = false,
+ const std::string& name_prefix = "")
+ : GarbageCollector(heap,
+ name_prefix + (name_prefix.empty() ? "" : " ") +
+ "concurrent copying + mark sweep") {}
+
+ ~ConcurrentCopying() {}
+
+ virtual void InitializePhase() OVERRIDE {}
+ virtual void MarkingPhase() OVERRIDE {}
+ virtual void ReclaimPhase() OVERRIDE {}
+ virtual void FinishPhase() OVERRIDE {}
+ virtual GcType GetGcType() const OVERRIDE {
+ return kGcTypePartial;
+ }
+ virtual CollectorType GetCollectorType() const OVERRIDE {
+ return kCollectorTypeCC;
+ }
+ virtual void RevokeAllThreadLocalBuffers() OVERRIDE {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ConcurrentCopying);
+};
+
+} // namespace collector
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 1e1e447..07951e0 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -39,17 +39,14 @@ GarbageCollector::GarbageCollector(Heap* heap, const std::string& name)
name_(name),
gc_cause_(kGcCauseForAlloc),
clear_soft_references_(false),
- verbose_(VLOG_IS_ON(heap)),
duration_ns_(0),
- timings_(name_.c_str(), true, verbose_),
+ timings_(name_.c_str(), true, VLOG_IS_ON(heap)),
pause_histogram_((name_ + " paused").c_str(), kPauseBucketSize, kPauseBucketCount),
cumulative_timings_(name) {
ResetCumulativeStatistics();
}
-bool GarbageCollector::HandleDirtyObjectsPhase() {
- DCHECK(IsConcurrent());
- return true;
+void GarbageCollector::PausePhase() {
}
void GarbageCollector::RegisterPause(uint64_t nano_length) {
@@ -64,12 +61,6 @@ void GarbageCollector::ResetCumulativeStatistics() {
total_freed_bytes_ = 0;
}
-void GarbageCollector::RevokeAllThreadLocalBuffers() {
- timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers");
- GetHeap()->RevokeAllThreadLocalBuffers();
- timings_.EndSplit();
-}
-
void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) {
ThreadList* thread_list = Runtime::Current()->GetThreadList();
Thread* self = Thread::Current();
@@ -85,50 +76,58 @@ void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) {
freed_objects_ = 0;
freed_large_objects_ = 0;
- InitializePhase();
-
- if (!IsConcurrent()) {
- // Pause is the entire length of the GC.
- uint64_t pause_start = NanoTime();
- ATRACE_BEGIN("Application threads suspended");
- // Mutator lock may be already exclusively held when we do garbage collections for changing the
- // current collector / allocator during process state updates.
- if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
- // PreGcRosAllocVerification() is called in Heap::TransitionCollector().
- RevokeAllThreadLocalBuffers();
- MarkingPhase();
- ReclaimPhase();
- // PostGcRosAllocVerification() is called in Heap::TransitionCollector().
- } else {
- thread_list->SuspendAll();
- GetHeap()->PreGcRosAllocVerification(&timings_);
- RevokeAllThreadLocalBuffers();
- MarkingPhase();
- ReclaimPhase();
- GetHeap()->PostGcRosAllocVerification(&timings_);
- thread_list->ResumeAll();
- }
- ATRACE_END();
- RegisterPause(NanoTime() - pause_start);
- } else {
- CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self));
- Thread* self = Thread::Current();
- {
- ReaderMutexLock mu(self, *Locks::mutator_lock_);
- MarkingPhase();
+ CollectorType collector_type = GetCollectorType();
+ switch (collector_type) {
+ case kCollectorTypeMS: // Fall through.
+ case kCollectorTypeSS: // Fall through.
+ case kCollectorTypeGSS: {
+ InitializePhase();
+ // Pause is the entire length of the GC.
+ uint64_t pause_start = NanoTime();
+ ATRACE_BEGIN("Application threads suspended");
+ // Mutator lock may be already exclusively held when we do garbage collections for changing
+ // the current collector / allocator during process state updates.
+ if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
+ // PreGcRosAllocVerification() is called in Heap::TransitionCollector().
+ RevokeAllThreadLocalBuffers();
+ MarkingPhase();
+ PausePhase();
+ ReclaimPhase();
+ // PostGcRosAllocVerification() is called in Heap::TransitionCollector().
+ } else {
+ ATRACE_BEGIN("Suspending mutator threads");
+ thread_list->SuspendAll();
+ ATRACE_END();
+ GetHeap()->PreGcRosAllocVerification(&timings_);
+ RevokeAllThreadLocalBuffers();
+ MarkingPhase();
+ PausePhase();
+ ReclaimPhase();
+ GetHeap()->PostGcRosAllocVerification(&timings_);
+ ATRACE_BEGIN("Resuming mutator threads");
+ thread_list->ResumeAll();
+ ATRACE_END();
+ }
+ ATRACE_END();
+ RegisterPause(NanoTime() - pause_start);
+ FinishPhase();
+ break;
}
- bool done = false;
- while (!done) {
+ case kCollectorTypeCMS: {
+ InitializePhase();
+ CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self));
+ {
+ ReaderMutexLock mu(self, *Locks::mutator_lock_);
+ MarkingPhase();
+ }
uint64_t pause_start = NanoTime();
ATRACE_BEGIN("Suspending mutator threads");
thread_list->SuspendAll();
ATRACE_END();
ATRACE_BEGIN("All mutator threads suspended");
GetHeap()->PreGcRosAllocVerification(&timings_);
- done = HandleDirtyObjectsPhase();
- if (done) {
- RevokeAllThreadLocalBuffers();
- }
+ PausePhase();
+ RevokeAllThreadLocalBuffers();
GetHeap()->PostGcRosAllocVerification(&timings_);
ATRACE_END();
uint64_t pause_end = NanoTime();
@@ -136,13 +135,27 @@ void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) {
thread_list->ResumeAll();
ATRACE_END();
RegisterPause(pause_end - pause_start);
+ {
+ ReaderMutexLock mu(self, *Locks::mutator_lock_);
+ ReclaimPhase();
+ }
+ FinishPhase();
+ break;
+ }
+ case kCollectorTypeCC: {
+ // To be implemented.
+ break;
}
- {
- ReaderMutexLock mu(self, *Locks::mutator_lock_);
- ReclaimPhase();
+ default: {
+ LOG(FATAL) << "Unreachable collector type=" << static_cast<size_t>(collector_type);
+ break;
}
}
- FinishPhase();
+ // Add the current timings to the cumulative timings.
+ cumulative_timings_.AddLogger(timings_);
+ // Update cumulative statistics with how many bytes the GC iteration freed.
+ total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects();
+ total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes();
uint64_t end_time = NanoTime();
duration_ns_ = end_time - start_time;
total_time_ns_ += GetDurationNs();
@@ -181,6 +194,16 @@ void GarbageCollector::SwapBitmaps() {
}
}
+uint64_t GarbageCollector::GetEstimatedMeanThroughput() const {
+ // Add 1ms to prevent possible division by 0.
+ return (total_freed_bytes_ * 1000) / (NsToMs(GetCumulativeTimings().GetTotalNs()) + 1);
+}
+
+uint64_t GarbageCollector::GetEstimatedLastIterationThroughput() const {
+ // Add 1ms to prevent possible division by 0.
+ return (freed_bytes_ * 1000) / (NsToMs(GetDurationNs()) + 1);
+}
+
} // namespace collector
} // namespace gc
} // namespace art
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index 8259cf0..ccfa9cf 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -20,6 +20,7 @@
#include "base/histogram.h"
#include "base/mutex.h"
#include "base/timing_logger.h"
+#include "gc/collector_type.h"
#include "gc/gc_cause.h"
#include "gc_type.h"
#include <stdint.h>
@@ -34,9 +35,6 @@ namespace collector {
class GarbageCollector {
public:
- // Returns true iff the garbage collector is concurrent.
- virtual bool IsConcurrent() const = 0;
-
GarbageCollector(Heap* heap, const std::string& name);
virtual ~GarbageCollector() { }
@@ -46,6 +44,8 @@ class GarbageCollector {
virtual GcType GetGcType() const = 0;
+ virtual CollectorType GetCollectorType() const = 0;
+
// Run the garbage collector.
void Run(GcCause gc_cause, bool clear_soft_references);
@@ -68,8 +68,7 @@ class GarbageCollector {
TimingLogger& GetTimings() {
return timings_;
}
-
- CumulativeLogger& GetCumulativeTimings() {
+ const CumulativeLogger& GetCumulativeTimings() const {
return cumulative_timings_;
}
@@ -111,6 +110,17 @@ class GarbageCollector {
return pause_histogram_;
}
+ // Returns the estimated throughput in bytes / second.
+ uint64_t GetEstimatedMeanThroughput() const;
+
+ // Returns the estimated throughput of the last GC iteration.
+ uint64_t GetEstimatedLastIterationThroughput() const;
+
+ // Returns how many GC iterations have been run.
+ size_t GetIterations() const {
+ return GetCumulativeTimings().GetIterations();
+ }
+
protected:
// The initial phase. Done without mutators paused.
virtual void InitializePhase() = 0;
@@ -118,8 +128,8 @@ class GarbageCollector {
// Mark all reachable objects, done concurrently.
virtual void MarkingPhase() = 0;
- // Only called for concurrent GCs. Gets called repeatedly until it succeeds.
- virtual bool HandleDirtyObjectsPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Only called for concurrent GCs.
+ virtual void PausePhase();
// Called with mutators running.
virtual void ReclaimPhase() = 0;
@@ -127,7 +137,8 @@ class GarbageCollector {
// Called after the GC is finished. Done without mutators paused.
virtual void FinishPhase() = 0;
- void RevokeAllThreadLocalBuffers();
+ // Revoke all the thread-local buffers.
+ virtual void RevokeAllThreadLocalBuffers() = 0;
static constexpr size_t kPauseBucketSize = 500;
static constexpr size_t kPauseBucketCount = 32;
@@ -139,8 +150,6 @@ class GarbageCollector {
GcCause gc_cause_;
bool clear_soft_references_;
- const bool verbose_;
-
uint64_t duration_ns_;
TimingLogger timings_;
diff --git a/runtime/gc/collector/immune_region.cc b/runtime/gc/collector/immune_region.cc
index 70a6213..3e1c944 100644
--- a/runtime/gc/collector/immune_region.cc
+++ b/runtime/gc/collector/immune_region.cc
@@ -28,8 +28,8 @@ ImmuneRegion::ImmuneRegion() {
}
void ImmuneRegion::Reset() {
- begin_ = nullptr;
- end_ = nullptr;
+ SetBegin(nullptr);
+ SetEnd(nullptr);
}
bool ImmuneRegion::AddContinuousSpace(space::ContinuousSpace* space) {
@@ -41,13 +41,13 @@ bool ImmuneRegion::AddContinuousSpace(space::ContinuousSpace* space) {
mirror::Object* space_begin = reinterpret_cast<mirror::Object*>(space->Begin());
mirror::Object* space_limit = reinterpret_cast<mirror::Object*>(space->Limit());
if (IsEmpty()) {
- begin_ = space_begin;
- end_ = space_limit;
+ SetBegin(space_begin);
+ SetEnd(space_limit);
} else {
if (space_limit <= begin_) { // Space is before the immune region.
- begin_ = space_begin;
+ SetBegin(space_begin);
} else if (space_begin >= end_) { // Space is after the immune region.
- end_ = space_limit;
+ SetEnd(space_limit);
} else {
return false;
}
diff --git a/runtime/gc/collector/immune_region.h b/runtime/gc/collector/immune_region.h
index 21d0b43..0c0a89b 100644
--- a/runtime/gc/collector/immune_region.h
+++ b/runtime/gc/collector/immune_region.h
@@ -46,16 +46,29 @@ class ImmuneRegion {
bool ContainsSpace(const space::ContinuousSpace* space) const;
// Returns true if an object is inside of the immune region (assumed to be marked).
bool ContainsObject(const mirror::Object* obj) const ALWAYS_INLINE {
- return obj >= begin_ && obj < end_;
+ // Note: Relies on integer underflow behavior.
+ return reinterpret_cast<uintptr_t>(obj) - reinterpret_cast<uintptr_t>(begin_) < size_;
+ }
+ void SetBegin(mirror::Object* begin) {
+ begin_ = begin;
+ UpdateSize();
+ }
+ void SetEnd(mirror::Object* end) {
+ end_ = end;
+ UpdateSize();
}
private:
bool IsEmpty() const {
- return begin_ == end_;
+ return size_ == 0;
+ }
+ void UpdateSize() {
+ size_ = reinterpret_cast<uintptr_t>(end_) - reinterpret_cast<uintptr_t>(begin_);
}
mirror::Object* begin_;
mirror::Object* end_;
+ uintptr_t size_;
};
} // namespace collector
diff --git a/runtime/gc/collector/mark_sweep-inl.h b/runtime/gc/collector/mark_sweep-inl.h
index 4915532..974952d 100644
--- a/runtime/gc/collector/mark_sweep-inl.h
+++ b/runtime/gc/collector/mark_sweep-inl.h
@@ -21,130 +21,31 @@
#include "gc/heap.h"
#include "mirror/art_field.h"
-#include "mirror/class.h"
-#include "mirror/object_array.h"
+#include "mirror/class-inl.h"
+#include "mirror/object_array-inl.h"
+#include "mirror/reference.h"
namespace art {
namespace gc {
namespace collector {
-template <typename MarkVisitor>
-inline void MarkSweep::ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor) {
+template<typename MarkVisitor, typename ReferenceVisitor>
+inline void MarkSweep::ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor,
+ const ReferenceVisitor& ref_visitor) {
if (kIsDebugBuild && !IsMarked(obj)) {
heap_->DumpSpaces();
LOG(FATAL) << "Scanning unmarked object " << obj;
}
- // The GetClass verifies the object, don't need to reverify after.
- mirror::Class* klass = obj->GetClass();
- // IsArrayClass verifies klass.
- if (UNLIKELY(klass->IsArrayClass())) {
- if (kCountScannedTypes) {
- ++array_count_;
- }
- if (klass->IsObjectArrayClass<kVerifyNone>()) {
- VisitObjectArrayReferences(obj->AsObjectArray<mirror::Object, kVerifyNone>(), visitor);
- }
- } else if (UNLIKELY(klass == mirror::Class::GetJavaLangClass())) {
- if (kCountScannedTypes) {
+ obj->VisitReferences<false>(visitor, ref_visitor);
+ if (kCountScannedTypes) {
+ mirror::Class* klass = obj->GetClass<kVerifyNone>();
+ if (UNLIKELY(klass == mirror::Class::GetJavaLangClass())) {
++class_count_;
- }
- VisitClassReferences(klass, obj, visitor);
- } else {
- if (kCountScannedTypes) {
+ } else if (UNLIKELY(klass->IsArrayClass<kVerifyNone>())) {
+ ++array_count_;
+ } else {
++other_count_;
}
- VisitOtherReferences(klass, obj, visitor);
- if (UNLIKELY(klass->IsReferenceClass<kVerifyNone>())) {
- DelayReferenceReferent(klass, obj);
- }
- }
-}
-
-template <typename Visitor>
-inline void MarkSweep::VisitObjectReferences(mirror::Object* obj, const Visitor& visitor,
- bool visit_class)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
- Locks::mutator_lock_) {
- mirror::Class* klass = obj->GetClass();
- if (klass->IsArrayClass()) {
- if (visit_class) {
- visitor(obj, klass, mirror::Object::ClassOffset(), false);
- }
- if (klass->IsObjectArrayClass<kVerifyNone>()) {
- VisitObjectArrayReferences(obj->AsObjectArray<mirror::Object, kVerifyNone>(), visitor);
- }
- } else if (klass == mirror::Class::GetJavaLangClass()) {
- DCHECK_EQ(klass->GetClass<kVerifyNone>(), mirror::Class::GetJavaLangClass());
- VisitClassReferences(klass, obj, visitor);
- } else {
- VisitOtherReferences(klass, obj, visitor);
- }
-}
-
-template <typename Visitor>
-inline void MarkSweep::VisitInstanceFieldsReferences(mirror::Class* klass, mirror::Object* obj,
- const Visitor& visitor)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
- VisitFieldsReferences(obj, klass->GetReferenceInstanceOffsets<kVerifyNone>(), false, visitor);
-}
-
-template <typename Visitor>
-inline void MarkSweep::VisitClassReferences(mirror::Class* klass, mirror::Object* obj,
- const Visitor& visitor)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
- VisitInstanceFieldsReferences(klass, obj, visitor);
- VisitStaticFieldsReferences(obj->AsClass<kVerifyNone>(), visitor);
-}
-
-template <typename Visitor>
-inline void MarkSweep::VisitStaticFieldsReferences(mirror::Class* klass, const Visitor& visitor)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
- VisitFieldsReferences(klass, klass->GetReferenceStaticOffsets<kVerifyNone>(), true, visitor);
-}
-
-template <typename Visitor>
-inline void MarkSweep::VisitFieldsReferences(mirror::Object* obj, uint32_t ref_offsets,
- bool is_static, const Visitor& visitor) {
- if (LIKELY(ref_offsets != CLASS_WALK_SUPER)) {
- // Found a reference offset bitmap. Mark the specified offsets.
- while (ref_offsets != 0) {
- size_t right_shift = CLZ(ref_offsets);
- MemberOffset field_offset = CLASS_OFFSET_FROM_CLZ(right_shift);
- mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyReads>(field_offset, false);
- visitor(obj, ref, field_offset, is_static);
- ref_offsets &= ~(CLASS_HIGH_BIT >> right_shift);
- }
- } else {
- // There is no reference offset bitmap. In the non-static case,
- // walk up the class inheritance hierarchy and find reference
- // offsets the hard way. In the static case, just consider this
- // class.
- for (mirror::Class* klass = is_static ? obj->AsClass<kVerifyNone>() : obj->GetClass<kVerifyNone>();
- klass != nullptr;
- klass = is_static ? nullptr : klass->GetSuperClass()) {
- size_t num_reference_fields = (is_static
- ? klass->NumReferenceStaticFields()
- : klass->NumReferenceInstanceFields());
- for (size_t i = 0; i < num_reference_fields; ++i) {
- mirror::ArtField* field = (is_static ? klass->GetStaticField(i)
- : klass->GetInstanceField(i));
- MemberOffset field_offset = field->GetOffset();
- mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyReads>(field_offset, false);
- visitor(obj, ref, field_offset, is_static);
- }
- }
- }
-}
-
-template <typename Visitor>
-inline void MarkSweep::VisitObjectArrayReferences(mirror::ObjectArray<mirror::Object>* array,
- const Visitor& visitor) {
- const size_t length = static_cast<size_t>(array->GetLength());
- for (size_t i = 0; i < length; ++i) {
- mirror::Object* element = array->GetWithoutChecks(static_cast<int32_t>(i));
- const size_t width = sizeof(mirror::HeapReference<mirror::Object>);
- MemberOffset offset(i * width + mirror::Array::DataOffset(width).Int32Value());
- visitor(array, element, offset, false);
}
}
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index fe5a75f..91ccd64 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -27,31 +27,20 @@
#include "base/mutex-inl.h"
#include "base/timing_logger.h"
#include "gc/accounting/card_table-inl.h"
-#include "gc/accounting/heap_bitmap.h"
+#include "gc/accounting/heap_bitmap-inl.h"
#include "gc/accounting/mod_union_table.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/heap.h"
#include "gc/space/image_space.h"
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
-#include "indirect_reference_table.h"
-#include "intern_table.h"
-#include "jni_internal.h"
-#include "monitor.h"
#include "mark_sweep-inl.h"
-#include "mirror/art_field.h"
#include "mirror/art_field-inl.h"
-#include "mirror/class-inl.h"
-#include "mirror/class_loader.h"
-#include "mirror/dex_cache.h"
-#include "mirror/reference-inl.h"
#include "mirror/object-inl.h"
-#include "mirror/object_array.h"
-#include "mirror/object_array-inl.h"
#include "runtime.h"
+#include "scoped_thread_state_change.h"
#include "thread-inl.h"
#include "thread_list.h"
-#include "verifier/method_verifier.h"
using ::art::mirror::ArtField;
using ::art::mirror::Class;
@@ -79,16 +68,20 @@ static constexpr size_t kMinimumParallelMarkStackSize = 128;
static constexpr bool kParallelProcessMarkStack = true;
// Profiling and information flags.
-static constexpr bool kCountClassesMarked = false;
static constexpr bool kProfileLargeObjects = false;
static constexpr bool kMeasureOverhead = false;
static constexpr bool kCountTasks = false;
static constexpr bool kCountJavaLangRefs = false;
+static constexpr bool kCountMarkedObjects = false;
// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%.
static constexpr bool kCheckLocks = kDebugLocking;
static constexpr bool kVerifyRoots = kIsDebugBuild;
+// If true, revoke the rosalloc thread-local buffers at the
+// checkpoint, as opposed to during the pause.
+static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true;
+
void MarkSweep::BindBitmaps() {
timings_.StartSplit("BindBitmaps");
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
@@ -105,9 +98,6 @@ MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_pre
: GarbageCollector(heap,
name_prefix +
(is_concurrent ? "concurrent mark sweep": "mark sweep")),
- current_mark_bitmap_(NULL),
- mark_stack_(NULL),
- live_stack_freeze_size_(0),
gc_barrier_(new Barrier(0)),
large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock),
mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
@@ -125,13 +115,20 @@ void MarkSweep::InitializePhase() {
other_count_ = 0;
large_object_test_ = 0;
large_object_mark_ = 0;
- classes_marked_ = 0;
overhead_time_ = 0;
work_chunks_created_ = 0;
work_chunks_deleted_ = 0;
reference_count_ = 0;
-
- FindDefaultMarkBitmap();
+ mark_null_count_ = 0;
+ mark_immune_count_ = 0;
+ mark_fastpath_count_ = 0;
+ mark_slowpath_count_ = 0;
+ FindDefaultSpaceBitmap();
+ {
+ // TODO: I don't think we should need heap bitmap lock to get the mark bitmap.
+ ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ mark_bitmap_ = heap_->GetMarkBitmap();
+ }
// Do any pre GC verification.
timings_.NewSplit("PreGcVerification");
@@ -145,54 +142,48 @@ void MarkSweep::ProcessReferences(Thread* self) {
&MarkObjectCallback, &ProcessMarkStackPausedCallback, this);
}
-bool MarkSweep::HandleDirtyObjectsPhase() {
- TimingLogger::ScopedSplit split("(Paused)HandleDirtyObjectsPhase", &timings_);
+void MarkSweep::PreProcessReferences() {
+ if (IsConcurrent()) {
+ // No reason to do this for non-concurrent GC since pre processing soft references only helps
+ // pauses.
+ timings_.NewSplit("PreProcessReferences");
+ GetHeap()->ProcessSoftReferences(timings_, clear_soft_references_, &IsMarkedCallback,
+ &MarkObjectCallback, &ProcessMarkStackPausedCallback, this);
+ }
+}
+
+void MarkSweep::PausePhase() {
+ TimingLogger::ScopedSplit split("(Paused)PausePhase", &timings_);
Thread* self = Thread::Current();
Locks::mutator_lock_->AssertExclusiveHeld(self);
-
- {
+ if (IsConcurrent()) {
+ // Handle the dirty objects if we are a concurrent GC.
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-
// Re-mark root set.
ReMarkRoots();
-
// Scan dirty objects, this is only required if we are not doing concurrent GC.
RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty);
}
-
ProcessReferences(self);
-
- // Only need to do this if we have the card mark verification on, and only during concurrent GC.
- if (GetHeap()->verify_missing_card_marks_ || GetHeap()->verify_pre_gc_heap_||
- GetHeap()->verify_post_gc_heap_) {
+ {
+ timings_.NewSplit("SwapStacks");
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- // This second sweep makes sure that we don't have any objects in the live stack which point to
- // freed objects. These cause problems since their references may be previously freed objects.
- SweepArray(GetHeap()->allocation_stack_.get(), false);
- // Since SweepArray() above resets the (active) allocation
- // stack. Need to revoke the thread-local allocation stacks that
- // point into it.
+ heap_->SwapStacks(self);
+ live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
+ // Need to revoke all the thread local allocation stacks since we just swapped the allocation
+ // stacks and don't want anybody to allocate into the live stack.
RevokeAllThreadLocalAllocationStacks(self);
}
-
timings_.StartSplit("PreSweepingGcVerification");
heap_->PreSweepingGcVerification(this);
timings_.EndSplit();
-
- // Ensure that nobody inserted items in the live stack after we swapped the stacks.
- ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
- CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
-
- // Disallow new system weaks to prevent a race which occurs when someone adds a new system
- // weak before we sweep them. Since this new system weak may not be marked, the GC may
- // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
- // reference to a string that is about to be swept.
- Runtime::Current()->DisallowNewSystemWeaks();
- return true;
-}
-
-bool MarkSweep::IsConcurrent() const {
- return is_concurrent_;
+ if (IsConcurrent()) {
+ // Disallow new system weaks to prevent a race which occurs when someone adds a new system
+ // weak before we sweep them. Since this new system weak may not be marked, the GC may
+ // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
+ // reference to a string that is about to be swept.
+ Runtime::Current()->DisallowNewSystemWeaks();
+ }
}
void MarkSweep::PreCleanCards() {
@@ -214,8 +205,7 @@ void MarkSweep::PreCleanCards() {
// reference write are visible to the GC before the card is scanned (this is due to locks being
// acquired / released in the checkpoint code).
// The other roots are also marked to help reduce the pause.
- MarkThreadRoots(self);
- // TODO: Only mark the dirty roots.
+ MarkRootsCheckpoint(self, false);
MarkNonThreadRoots();
MarkConcurrentRoots(
static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots));
@@ -228,6 +218,7 @@ void MarkSweep::PreCleanCards() {
void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) {
if (kUseThreadLocalAllocationStack) {
+ timings_.NewSplit("RevokeAllThreadLocalAllocationStacks");
Locks::mutator_lock_->AssertExclusiveHeld(self);
heap_->RevokeAllThreadLocalAllocationStacks(self);
}
@@ -238,23 +229,17 @@ void MarkSweep::MarkingPhase() {
Thread* self = Thread::Current();
BindBitmaps();
- FindDefaultMarkBitmap();
+ FindDefaultSpaceBitmap();
// Process dirty cards and add dirty cards to mod union tables.
heap_->ProcessCards(timings_, false);
- // Need to do this before the checkpoint since we don't want any threads to add references to
- // the live stack during the recursive mark.
- timings_.NewSplit("SwapStacks");
- heap_->SwapStacks(self);
-
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
MarkRoots(self);
- live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
- UpdateAndMarkModUnion();
MarkReachableObjects();
// Pre-clean dirtied cards to reduce pauses.
PreCleanCards();
+ PreProcessReferences();
}
void MarkSweep::UpdateAndMarkModUnion() {
@@ -265,23 +250,13 @@ void MarkSweep::UpdateAndMarkModUnion() {
TimingLogger::ScopedSplit split(name, &timings_);
accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space);
CHECK(mod_union_table != nullptr);
- mod_union_table->UpdateAndMarkReferences(MarkObjectCallback, this);
+ mod_union_table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
}
}
}
-void MarkSweep::MarkThreadRoots(Thread* self) {
- MarkRootsCheckpoint(self);
-}
-
void MarkSweep::MarkReachableObjects() {
- // Mark everything allocated since the last as GC live so that we can sweep concurrently,
- // knowing that new allocations won't be marked as live.
- timings_.StartSplit("MarkStackAsLive");
- accounting::ObjectStack* live_stack = heap_->GetLiveStack();
- heap_->MarkAllocStackAsLive(live_stack);
- live_stack->Reset();
- timings_.EndSplit();
+ UpdateAndMarkModUnion();
// Recursively mark all the non-image bits set in the mark bitmap.
RecursiveMark();
}
@@ -289,44 +264,10 @@ void MarkSweep::MarkReachableObjects() {
void MarkSweep::ReclaimPhase() {
TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
Thread* self = Thread::Current();
-
- if (!IsConcurrent()) {
- ProcessReferences(self);
- }
-
- {
- WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- SweepSystemWeaks();
- }
-
+ SweepSystemWeaks(self);
if (IsConcurrent()) {
Runtime::Current()->AllowNewSystemWeaks();
-
- TimingLogger::ScopedSplit split("UnMarkAllocStack", &timings_);
- WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
- if (!kPreCleanCards) {
- // The allocation stack contains things allocated since the start of the GC. These may have
- // been marked during this GC meaning they won't be eligible for reclaiming in the next
- // sticky GC. Unmark these objects so that they are eligible for reclaiming in the next
- // sticky GC.
- // There is a race here which is safely handled. Another thread such as the hprof could
- // have flushed the alloc stack after we resumed the threads. This is safe however, since
- // reseting the allocation stack zeros it out with madvise. This means that we will either
- // read NULLs or attempt to unmark a newly allocated object which will not be marked in the
- // first place.
- // We can't do this if we pre-clean cards since we will unmark objects which are no longer on
- // a dirty card since we aged cards during the pre-cleaning process.
- mirror::Object** end = allocation_stack->End();
- for (mirror::Object** it = allocation_stack->Begin(); it != end; ++it) {
- const Object* obj = *it;
- if (obj != nullptr) {
- UnMarkObjectNonNull(obj);
- }
- }
- }
}
-
{
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
@@ -346,14 +287,13 @@ void MarkSweep::ReclaimPhase() {
}
}
-void MarkSweep::FindDefaultMarkBitmap() {
+void MarkSweep::FindDefaultSpaceBitmap() {
TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_);
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
if (bitmap != nullptr &&
space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
- current_mark_bitmap_ = bitmap;
- CHECK(current_mark_bitmap_ != NULL);
+ current_space_bitmap_ = bitmap;
return;
}
}
@@ -379,7 +319,7 @@ void MarkSweep::ResizeMarkStack(size_t new_size) {
}
}
-inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj) {
+inline void MarkSweep::MarkObjectNonNullParallel(Object* obj) {
DCHECK(obj != NULL);
if (MarkObjectParallel(obj)) {
MutexLock mu(Thread::Current(), mark_stack_lock_);
@@ -387,7 +327,7 @@ inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj) {
ExpandMarkStack();
}
// The object must be pushed on to the mark stack.
- mark_stack_->PushBack(const_cast<Object*>(obj));
+ mark_stack_->PushBack(obj);
}
}
@@ -397,68 +337,52 @@ mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* obj, void* arg) {
return obj;
}
-inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) {
- DCHECK(!immune_region_.ContainsObject(obj));
-
- if (kUseBrooksPointer) {
- // Verify all the objects have the correct Brooks pointer installed.
- obj->AssertSelfBrooksPointer();
- }
-
- // Try to take advantage of locality of references within a space, failing this find the space
- // the hard way.
- accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
- if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
- accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
- if (LIKELY(new_bitmap != NULL)) {
- object_bitmap = new_bitmap;
- } else {
- MarkLargeObject(obj, false);
- return;
- }
- }
-
- DCHECK(object_bitmap->HasAddress(obj));
- object_bitmap->Clear(obj);
+void MarkSweep::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) {
+ reinterpret_cast<MarkSweep*>(arg)->MarkObject(ref->AsMirrorPtr());
}
-inline void MarkSweep::MarkObjectNonNull(const Object* obj) {
- DCHECK(obj != NULL);
-
+inline void MarkSweep::MarkObjectNonNull(Object* obj) {
+ DCHECK(obj != nullptr);
if (kUseBrooksPointer) {
// Verify all the objects have the correct Brooks pointer installed.
obj->AssertSelfBrooksPointer();
}
-
if (immune_region_.ContainsObject(obj)) {
+ if (kCountMarkedObjects) {
+ ++mark_immune_count_;
+ }
DCHECK(IsMarked(obj));
return;
}
-
// Try to take advantage of locality of references within a space, failing this find the space
// the hard way.
- accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
+ accounting::SpaceBitmap* object_bitmap = current_space_bitmap_;
if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
- accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
- if (LIKELY(new_bitmap != NULL)) {
- object_bitmap = new_bitmap;
- } else {
+ object_bitmap = mark_bitmap_->GetContinuousSpaceBitmap(obj);
+ if (kCountMarkedObjects) {
+ ++mark_slowpath_count_;
+ }
+ if (UNLIKELY(object_bitmap == nullptr)) {
MarkLargeObject(obj, true);
return;
}
+ } else if (kCountMarkedObjects) {
+ ++mark_fastpath_count_;
}
-
// This object was not previously marked.
- if (!object_bitmap->Test(obj)) {
- object_bitmap->Set(obj);
- if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
- // Lock is not needed but is here anyways to please annotalysis.
- MutexLock mu(Thread::Current(), mark_stack_lock_);
- ExpandMarkStack();
- }
- // The object must be pushed on to the mark stack.
- mark_stack_->PushBack(const_cast<Object*>(obj));
+ if (!object_bitmap->Set(obj)) {
+ PushOnMarkStack(obj);
+ }
+}
+
+inline void MarkSweep::PushOnMarkStack(Object* obj) {
+ if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
+ // Lock is not needed but is here anyways to please annotalysis.
+ MutexLock mu(Thread::Current(), mark_stack_lock_);
+ ExpandMarkStack();
}
+ // The object must be pushed on to the mark stack.
+ mark_stack_->PushBack(obj);
}
// Rare case, probably not worth inlining since it will increase instruction cache miss rate.
@@ -490,23 +414,20 @@ bool MarkSweep::MarkLargeObject(const Object* obj, bool set) {
}
inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
- DCHECK(obj != NULL);
-
+ DCHECK(obj != nullptr);
if (kUseBrooksPointer) {
// Verify all the objects have the correct Brooks pointer installed.
obj->AssertSelfBrooksPointer();
}
-
if (immune_region_.ContainsObject(obj)) {
DCHECK(IsMarked(obj));
return false;
}
-
// Try to take advantage of locality of references within a space, failing this find the space
// the hard way.
- accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
+ accounting::SpaceBitmap* object_bitmap = current_space_bitmap_;
if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
- accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
+ accounting::SpaceBitmap* new_bitmap = mark_bitmap_->GetContinuousSpaceBitmap(obj);
if (new_bitmap != NULL) {
object_bitmap = new_bitmap;
} else {
@@ -516,23 +437,20 @@ inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
return MarkLargeObject(obj, true);
}
}
-
// Return true if the object was not previously marked.
return !object_bitmap->AtomicTestAndSet(obj);
}
-// Used to mark objects when recursing. Recursion is done by moving
-// the finger across the bitmaps in address order and marking child
-// objects. Any newly-marked objects whose addresses are lower than
-// the finger won't be visited by the bitmap scan, so those objects
-// need to be added to the mark stack.
-inline void MarkSweep::MarkObject(const Object* obj) {
- if (obj != NULL) {
+// Used to mark objects when processing the mark stack. If an object is null, it is not marked.
+inline void MarkSweep::MarkObject(Object* obj) {
+ if (obj != nullptr) {
MarkObjectNonNull(obj);
+ } else if (kCountMarkedObjects) {
+ ++mark_null_count_;
}
}
-void MarkSweep::MarkRootParallelCallback(mirror::Object** root, void* arg, uint32_t /*thread_id*/,
+void MarkSweep::MarkRootParallelCallback(Object** root, void* arg, uint32_t /*thread_id*/,
RootType /*root_type*/) {
reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(*root);
}
@@ -577,7 +495,7 @@ void MarkSweep::MarkRoots(Thread* self) {
timings_.EndSplit();
RevokeAllThreadLocalAllocationStacks(self);
} else {
- MarkThreadRoots(self);
+ MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint);
// At this point the live stack should no longer have any mutators which push into it.
MarkNonThreadRoots();
MarkConcurrentRoots(
@@ -603,8 +521,8 @@ class ScanObjectVisitor {
explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
: mark_sweep_(mark_sweep) {}
- // TODO: Fixme when anotatalysis works with visitors.
- void operator()(Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
+ void operator()(Object* obj) const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
if (kCheckLocks) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
@@ -616,11 +534,26 @@ class ScanObjectVisitor {
MarkSweep* const mark_sweep_;
};
+class DelayReferenceReferentVisitor {
+ public:
+ explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) {
+ }
+
+ void operator()(mirror::Class* klass, mirror::Reference* ref) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ collector_->DelayReferenceReferent(klass, ref);
+ }
+
+ private:
+ MarkSweep* const collector_;
+};
+
template <bool kUseFinger = false>
class MarkStackTask : public Task {
public:
MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size,
- const Object** mark_stack)
+ Object** mark_stack)
: mark_sweep_(mark_sweep),
thread_pool_(thread_pool),
mark_stack_pos_(mark_stack_size) {
@@ -638,27 +571,44 @@ class MarkStackTask : public Task {
static const size_t kMaxSize = 1 * KB;
protected:
+ class MarkObjectParallelVisitor {
+ public:
+ explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task,
+ MarkSweep* mark_sweep) ALWAYS_INLINE
+ : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {}
+
+ void operator()(Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset, false);
+ if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) {
+ if (kUseFinger) {
+ android_memory_barrier();
+ if (reinterpret_cast<uintptr_t>(ref) >=
+ static_cast<uintptr_t>(mark_sweep_->atomic_finger_)) {
+ return;
+ }
+ }
+ chunk_task_->MarkStackPush(ref);
+ }
+ }
+
+ private:
+ MarkStackTask<kUseFinger>* const chunk_task_;
+ MarkSweep* const mark_sweep_;
+ };
+
class ScanObjectParallelVisitor {
public:
explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task) ALWAYS_INLINE
: chunk_task_(chunk_task) {}
- void operator()(Object* obj) const {
- MarkSweep* mark_sweep = chunk_task_->mark_sweep_;
- mark_sweep->ScanObjectVisit(obj,
- [mark_sweep, this](Object* /* obj */, Object* ref, const MemberOffset& /* offset */,
- bool /* is_static */) ALWAYS_INLINE_LAMBDA {
- if (ref != nullptr && mark_sweep->MarkObjectParallel(ref)) {
- if (kUseFinger) {
- android_memory_barrier();
- if (reinterpret_cast<uintptr_t>(ref) >=
- static_cast<uintptr_t>(mark_sweep->atomic_finger_)) {
- return;
- }
- }
- chunk_task_->MarkStackPush(ref);
- }
- });
+ // No thread safety analysis since multiple threads will use this visitor.
+ void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ MarkSweep* const mark_sweep = chunk_task_->mark_sweep_;
+ MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep);
+ DelayReferenceReferentVisitor ref_visitor(mark_sweep);
+ mark_sweep->ScanObjectVisit(obj, mark_visitor, ref_visitor);
}
private:
@@ -676,11 +626,11 @@ class MarkStackTask : public Task {
MarkSweep* const mark_sweep_;
ThreadPool* const thread_pool_;
// Thread local mark stack for this task.
- const Object* mark_stack_[kMaxSize];
+ Object* mark_stack_[kMaxSize];
// Mark stack position.
size_t mark_stack_pos_;
- void MarkStackPush(const Object* obj) ALWAYS_INLINE {
+ void MarkStackPush(Object* obj) ALWAYS_INLINE {
if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
// Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
mark_stack_pos_ /= 2;
@@ -689,7 +639,7 @@ class MarkStackTask : public Task {
thread_pool_->AddTask(Thread::Current(), task);
}
DCHECK(obj != nullptr);
- DCHECK(mark_stack_pos_ < kMaxSize);
+ DCHECK_LT(mark_stack_pos_, kMaxSize);
mark_stack_[mark_stack_pos_++] = obj;
}
@@ -698,16 +648,17 @@ class MarkStackTask : public Task {
}
// Scans all of the objects
- virtual void Run(Thread* self) {
+ virtual void Run(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
ScanObjectParallelVisitor visitor(this);
// TODO: Tune this.
static const size_t kFifoSize = 4;
- BoundedFifoPowerOfTwo<const Object*, kFifoSize> prefetch_fifo;
+ BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
for (;;) {
- const Object* obj = nullptr;
+ Object* obj = nullptr;
if (kUseMarkStackPrefetch) {
while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) {
- const Object* obj = mark_stack_[--mark_stack_pos_];
+ Object* obj = mark_stack_[--mark_stack_pos_];
DCHECK(obj != nullptr);
__builtin_prefetch(obj);
prefetch_fifo.push_back(obj);
@@ -724,7 +675,7 @@ class MarkStackTask : public Task {
obj = mark_stack_[--mark_stack_pos_];
}
DCHECK(obj != nullptr);
- visitor(const_cast<mirror::Object*>(obj));
+ visitor(obj);
}
}
};
@@ -733,7 +684,7 @@ class CardScanTask : public MarkStackTask<false> {
public:
CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, accounting::SpaceBitmap* bitmap,
byte* begin, byte* end, byte minimum_age, size_t mark_stack_size,
- const Object** mark_stack_obj)
+ Object** mark_stack_obj)
: MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
bitmap_(bitmap),
begin_(begin),
@@ -784,8 +735,8 @@ void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) {
// scanned at the same time.
timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects");
// Try to take some of the mark stack since we can pass this off to the worker tasks.
- const Object** mark_stack_begin = const_cast<const Object**>(mark_stack_->Begin());
- const Object** mark_stack_end = const_cast<const Object**>(mark_stack_->End());
+ Object** mark_stack_begin = mark_stack_->Begin();
+ Object** mark_stack_end = mark_stack_->End();
const size_t mark_stack_size = mark_stack_end - mark_stack_begin;
// Estimated number of work tasks we will create.
const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count;
@@ -818,7 +769,7 @@ void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) {
size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining);
mark_stack_end -= mark_stack_increment;
mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment));
- DCHECK_EQ(mark_stack_end, const_cast<const art::mirror::Object **>(mark_stack_->End()));
+ DCHECK_EQ(mark_stack_end, mark_stack_->End());
// Add the new task to the thread pool.
auto* task = new CardScanTask(thread_pool, this, space->GetMarkBitmap(), card_begin,
card_begin + card_increment, minimum_age,
@@ -907,8 +858,8 @@ void MarkSweep::RecursiveMark() {
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
(!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
- current_mark_bitmap_ = space->GetMarkBitmap();
- if (current_mark_bitmap_ == nullptr) {
+ current_space_bitmap_ = space->GetMarkBitmap();
+ if (current_space_bitmap_ == nullptr) {
continue;
}
if (parallel) {
@@ -927,7 +878,7 @@ void MarkSweep::RecursiveMark() {
delta = RoundUp(delta, KB);
if (delta < 16 * KB) delta = end - begin;
begin += delta;
- auto* task = new RecursiveMarkTask(thread_pool, this, current_mark_bitmap_, start,
+ auto* task = new RecursiveMarkTask(thread_pool, this, current_space_bitmap_, start,
begin);
thread_pool->AddTask(self, task);
}
@@ -939,7 +890,7 @@ void MarkSweep::RecursiveMark() {
// This function does not handle heap end increasing, so we must use the space end.
uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
- current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor);
+ current_space_bitmap_->VisitMarkedRange(begin, end, scan_visitor);
}
}
}
@@ -974,10 +925,10 @@ void MarkSweep::ReMarkRoots() {
}
}
-void MarkSweep::SweepSystemWeaks() {
- Runtime* runtime = Runtime::Current();
+void MarkSweep::SweepSystemWeaks(Thread* self) {
+ WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
timings_.StartSplit("SweepSystemWeaks");
- runtime->SweepSystemWeaks(IsMarkedCallback, this);
+ Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this);
timings_.EndSplit();
}
@@ -988,14 +939,13 @@ mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg
}
void MarkSweep::VerifyIsLive(const Object* obj) {
- Heap* heap = GetHeap();
- if (!heap->GetLiveBitmap()->Test(obj)) {
- space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
+ if (!heap_->GetLiveBitmap()->Test(obj)) {
+ space::LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace();
if (!large_object_space->GetLiveObjects()->Test(obj)) {
- if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) ==
- heap->allocation_stack_->End()) {
+ if (std::find(heap_->allocation_stack_->Begin(), heap_->allocation_stack_->End(), obj) ==
+ heap_->allocation_stack_->End()) {
// Object not found!
- heap->DumpSpaces();
+ heap_->DumpSpaces();
LOG(FATAL) << "Found dead object " << obj;
}
}
@@ -1009,9 +959,14 @@ void MarkSweep::VerifySystemWeaks() {
class CheckpointMarkThreadRoots : public Closure {
public:
- explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
+ explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep,
+ bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)
+ : mark_sweep_(mark_sweep),
+ revoke_ros_alloc_thread_local_buffers_at_checkpoint_(
+ revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
+ }
- virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
+ virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
ATRACE_BEGIN("Marking thread roots");
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* self = Thread::Current();
@@ -1019,18 +974,22 @@ class CheckpointMarkThreadRoots : public Closure {
<< thread->GetState() << " thread " << thread << " self " << self;
thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_);
ATRACE_END();
- if (kUseThreadLocalAllocationStack) {
- thread->RevokeThreadLocalAllocationStack();
+ if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) {
+ ATRACE_BEGIN("RevokeRosAllocThreadLocalBuffers");
+ mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread);
+ ATRACE_END();
}
mark_sweep_->GetBarrier().Pass(self);
}
private:
- MarkSweep* mark_sweep_;
+ MarkSweep* const mark_sweep_;
+ const bool revoke_ros_alloc_thread_local_buffers_at_checkpoint_;
};
-void MarkSweep::MarkRootsCheckpoint(Thread* self) {
- CheckpointMarkThreadRoots check_point(this);
+void MarkSweep::MarkRootsCheckpoint(Thread* self,
+ bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
+ CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint);
timings_.StartSplit("MarkRootsCheckpoint");
ThreadList* thread_list = Runtime::Current()->GetThreadList();
// Request the check point is run on all threads returning a count of the threads that must
@@ -1040,10 +999,10 @@ void MarkSweep::MarkRootsCheckpoint(Thread* self) {
// TODO: optimize to not release locks when there are no threads to wait for.
Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
Locks::mutator_lock_->SharedUnlock(self);
- ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun);
- CHECK_EQ(old_state, kWaitingPerformingGc);
- gc_barrier_->Increment(self, barrier_count);
- self->SetState(kWaitingPerformingGc);
+ {
+ ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
+ gc_barrier_->Increment(self, barrier_count);
+ }
Locks::mutator_lock_->SharedLock(self);
Locks::heap_bitmap_lock_->ExclusiveLock(self);
timings_.EndSplit();
@@ -1059,7 +1018,7 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma
size_t freed_objects = 0;
size_t freed_large_objects = 0;
// How many objects are left in the array, modified after each space is swept.
- Object** objects = const_cast<Object**>(allocations->Begin());
+ Object** objects = allocations->Begin();
size_t count = allocations->Size();
// Change the order to ensure that the non-moving space last swept as an optimization.
std::vector<space::ContinuousSpace*> sweep_spaces;
@@ -1157,6 +1116,16 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma
}
void MarkSweep::Sweep(bool swap_bitmaps) {
+ // Ensure that nobody inserted items in the live stack after we swapped the stacks.
+ CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
+ // Mark everything allocated since the last as GC live so that we can sweep concurrently,
+ // knowing that new allocations won't be marked as live.
+ timings_.StartSplit("MarkStackAsLive");
+ accounting::ObjectStack* live_stack = heap_->GetLiveStack();
+ heap_->MarkAllocStackAsLive(live_stack);
+ live_stack->Reset();
+ timings_.EndSplit();
+
DCHECK(mark_stack_->IsEmpty());
TimingLogger::ScopedSplit("Sweep", &timings_);
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
@@ -1185,27 +1154,29 @@ void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
GetHeap()->RecordFree(freed_objects, freed_bytes);
}
-// Process the "referent" field in a java.lang.ref.Reference. If the
-// referent has not yet been marked, put it on the appropriate list in
-// the heap for later processing.
-void MarkSweep::DelayReferenceReferent(mirror::Class* klass, Object* obj) {
+// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
+// marked, put it on the appropriate list in the heap for later processing.
+void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref) {
DCHECK(klass != nullptr);
- heap_->DelayReferenceReferent(klass, obj->AsReference(), IsMarkedCallback, this);
+ if (kCountJavaLangRefs) {
+ ++reference_count_;
+ }
+ heap_->DelayReferenceReferent(klass, ref, IsMarkedCallback, this);
}
class MarkObjectVisitor {
public:
- explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {}
+ explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {
+ }
- // TODO: Fixme when anotatalysis works with visitors.
- void operator()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */,
- bool /* is_static */) const ALWAYS_INLINE
- NO_THREAD_SAFETY_ANALYSIS {
+ void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
+ ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
if (kCheckLocks) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
}
- mark_sweep_->MarkObject(ref);
+ mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset, false));
}
private:
@@ -1215,12 +1186,12 @@ class MarkObjectVisitor {
// Scans an object reference. Determines the type of the reference
// and dispatches to a specialized scanning routine.
void MarkSweep::ScanObject(Object* obj) {
- MarkObjectVisitor visitor(this);
- ScanObjectVisit(obj, visitor);
+ MarkObjectVisitor mark_visitor(this);
+ DelayReferenceReferentVisitor ref_visitor(this);
+ ScanObjectVisit(obj, mark_visitor, ref_visitor);
}
void MarkSweep::ProcessMarkStackPausedCallback(void* arg) {
- DCHECK(arg != nullptr);
reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(true);
}
@@ -1233,8 +1204,7 @@ void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
// Split the current mark stack up into work tasks.
for (mirror::Object **it = mark_stack_->Begin(), **end = mark_stack_->End(); it < end; ) {
const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size);
- thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta,
- const_cast<const mirror::Object**>(it)));
+ thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it));
it += delta;
}
thread_pool->SetMaxActiveWorkers(thread_count - 1);
@@ -1276,7 +1246,7 @@ void MarkSweep::ProcessMarkStack(bool paused) {
}
obj = mark_stack_->PopBack();
}
- DCHECK(obj != NULL);
+ DCHECK(obj != nullptr);
ScanObject(obj);
}
}
@@ -1288,70 +1258,54 @@ inline bool MarkSweep::IsMarked(const Object* object) const
if (immune_region_.ContainsObject(object)) {
return true;
}
- DCHECK(current_mark_bitmap_ != NULL);
- if (current_mark_bitmap_->HasAddress(object)) {
- return current_mark_bitmap_->Test(object);
+ if (current_space_bitmap_->HasAddress(object)) {
+ return current_space_bitmap_->Test(object);
}
- return heap_->GetMarkBitmap()->Test(object);
+ return mark_bitmap_->Test(object);
}
void MarkSweep::FinishPhase() {
TimingLogger::ScopedSplit split("FinishPhase", &timings_);
// Can't enqueue references if we hold the mutator lock.
- Heap* heap = GetHeap();
timings_.NewSplit("PostGcVerification");
- heap->PostGcVerification(this);
-
- // Update the cumulative statistics
- total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects();
- total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes();
-
- // Ensure that the mark stack is empty.
- CHECK(mark_stack_->IsEmpty());
-
+ heap_->PostGcVerification(this);
if (kCountScannedTypes) {
VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_
<< " other=" << other_count_;
}
-
if (kCountTasks) {
VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_;
}
-
if (kMeasureOverhead) {
VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_);
}
-
if (kProfileLargeObjects) {
VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_;
}
-
- if (kCountClassesMarked) {
- VLOG(gc) << "Classes marked " << classes_marked_;
- }
-
if (kCountJavaLangRefs) {
VLOG(gc) << "References scanned " << reference_count_;
}
-
- // Update the cumulative loggers.
- cumulative_timings_.Start();
- cumulative_timings_.AddLogger(timings_);
- cumulative_timings_.End();
-
- // Clear all of the spaces' mark bitmaps.
- for (const auto& space : GetHeap()->GetContinuousSpaces()) {
- accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
- if (bitmap != nullptr &&
- space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
- bitmap->Clear();
- }
+ if (kCountMarkedObjects) {
+ VLOG(gc) << "Marked: null=" << mark_null_count_ << " immune=" << mark_immune_count_
+ << " fastpath=" << mark_fastpath_count_ << " slowpath=" << mark_slowpath_count_;
}
+ CHECK(mark_stack_->IsEmpty()); // Ensure that the mark stack is empty.
mark_stack_->Reset();
+ WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ heap_->ClearMarkedObjects();
+}
- // Reset the marked large objects.
- space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
- large_objects->GetMarkObjects()->Clear();
+void MarkSweep::RevokeAllThreadLocalBuffers() {
+ if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) {
+ // If concurrent, rosalloc thread-local buffers are revoked at the
+ // thread checkpoint. Bump pointer space thread-local buffers must
+ // not be in use.
+ GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
+ } else {
+ timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers");
+ GetHeap()->RevokeAllThreadLocalBuffers();
+ timings_.EndSplit();
+ }
}
} // namespace collector
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index df19f88..f1fd546 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -32,32 +32,22 @@ namespace art {
namespace mirror {
class Class;
class Object;
- template<class T> class ObjectArray;
+ class Reference;
} // namespace mirror
-class StackVisitor;
class Thread;
enum VisitRootFlags : uint8_t;
namespace gc {
+class Heap;
+
namespace accounting {
- template <typename T> class AtomicStack;
- class MarkIfReachesAllocspaceVisitor;
- class ModUnionClearCardVisitor;
- class ModUnionVisitor;
- class ModUnionTableBitmap;
- class MarkStackChunk;
+ template<typename T> class AtomicStack;
typedef AtomicStack<mirror::Object*> ObjectStack;
class SpaceBitmap;
} // namespace accounting
-namespace space {
- class ContinuousSpace;
-} // namespace space
-
-class Heap;
-
namespace collector {
class MarkSweep : public GarbageCollector {
@@ -68,24 +58,30 @@ class MarkSweep : public GarbageCollector {
virtual void InitializePhase() OVERRIDE;
virtual void MarkingPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- virtual bool HandleDirtyObjectsPhase() OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ virtual void PausePhase() OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
virtual void ReclaimPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
virtual void FinishPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
virtual void MarkReachableObjects()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- virtual bool IsConcurrent() const OVERRIDE;
+ bool IsConcurrent() const {
+ return is_concurrent_;
+ }
virtual GcType GetGcType() const OVERRIDE {
return kGcTypeFull;
}
+ virtual CollectorType GetCollectorType() const OVERRIDE {
+ return is_concurrent_ ? kCollectorTypeCMS : kCollectorTypeMS;
+ }
+
// Initializes internal structures.
void Init();
// Find the default mark bitmap.
- void FindDefaultMarkBitmap();
+ void FindDefaultSpaceBitmap();
// Marks all objects in the root set at the start of a garbage collection.
void MarkRoots(Thread* self)
@@ -100,7 +96,7 @@ class MarkSweep : public GarbageCollector {
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void MarkRootsCheckpoint(Thread* self)
+ void MarkRootsCheckpoint(Thread* self, bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -126,8 +122,12 @@ class MarkSweep : public GarbageCollector {
void ProcessReferences(Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Update and mark references from immune spaces. Virtual as overridden by StickyMarkSweep.
- virtual void UpdateAndMarkModUnion()
+ void PreProcessReferences()
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Update and mark references from immune spaces.
+ void UpdateAndMarkModUnion()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Pre clean cards to reduce how much work is needed in the pause.
@@ -152,13 +152,15 @@ class MarkSweep : public GarbageCollector {
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // TODO: enable thread safety analysis when in use by multiple worker threads.
- template <typename MarkVisitor>
- void ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor)
- NO_THREAD_SAFETY_ANALYSIS;
+ // No thread safety analysis due to lambdas.
+ template<typename MarkVisitor, typename ReferenceVisitor>
+ void ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor,
+ const ReferenceVisitor& ref_visitor)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- void SweepSystemWeaks()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ void SweepSystemWeaks(Thread* self)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
static mirror::Object* VerifySystemWeakIsLiveCallback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -170,15 +172,14 @@ class MarkSweep : public GarbageCollector {
void VerifyIsLive(const mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- template <typename Visitor>
- static void VisitObjectReferences(mirror::Object* obj, const Visitor& visitor, bool visit_class)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
- Locks::mutator_lock_);
-
static mirror::Object* MarkObjectCallback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ static void MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* ref, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
static void MarkRootCallback(mirror::Object** root, void* arg, uint32_t thread_id,
RootType root_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
@@ -197,7 +198,7 @@ class MarkSweep : public GarbageCollector {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Marks an object.
- void MarkObject(const mirror::Object* obj)
+ void MarkObject(mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -205,6 +206,10 @@ class MarkSweep : public GarbageCollector {
return *gc_barrier_;
}
+ // Schedules an unmarked object for reference processing.
+ void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
protected:
// Returns true if the object has its bit set in the mark bitmap.
bool IsMarked(const mirror::Object* object) const;
@@ -213,31 +218,19 @@ class MarkSweep : public GarbageCollector {
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
static void VerifyImageRootVisitor(mirror::Object* root, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
- Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
- void MarkObjectNonNull(const mirror::Object* obj)
+ void MarkObjectNonNull(mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- // Unmarks an object by clearing the bit inside of the corresponding bitmap, or if it is in a
- // space set, removing the object from the set.
- void UnMarkObjectNonNull(const mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
- // Mark the vm thread roots.
- void MarkThreadRoots(Thread* self)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// Marks an object atomically, safe to use from multiple threads.
- void MarkObjectNonNullParallel(const mirror::Object* obj);
+ void MarkObjectNonNullParallel(mirror::Object* obj);
// Marks or unmarks a large object based on whether or not set is true. If set is true, then we
// mark, otherwise we unmark.
bool MarkLargeObject(const mirror::Object* obj, bool set)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) LOCKS_EXCLUDED(large_object_lock_);
// Returns true if we need to add obj to a mark stack.
bool MarkObjectParallel(const mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
@@ -261,49 +254,14 @@ class MarkSweep : public GarbageCollector {
void VerifyRoot(const mirror::Object* root, size_t vreg, const StackVisitor* visitor)
NO_THREAD_SAFETY_ANALYSIS;
- template <typename Visitor>
- static void VisitInstanceFieldsReferences(mirror::Class* klass, mirror::Object* obj,
- const Visitor& visitor)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
- // Visit the header, static field references, and interface pointers of a class object.
- template <typename Visitor>
- static void VisitClassReferences(mirror::Class* klass, mirror::Object* obj,
- const Visitor& visitor)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
- template <typename Visitor>
- static void VisitStaticFieldsReferences(mirror::Class* klass, const Visitor& visitor)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
- template <typename Visitor>
- static void VisitFieldsReferences(mirror::Object* obj, uint32_t ref_offsets, bool is_static,
- const Visitor& visitor)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
- // Visit all of the references in an object array.
- template <typename Visitor>
- static void VisitObjectArrayReferences(mirror::ObjectArray<mirror::Object>* array,
- const Visitor& visitor)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
- // Visits the header and field references of a data object.
- template <typename Visitor>
- static void VisitOtherReferences(mirror::Class* klass, mirror::Object* obj,
- const Visitor& visitor)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
- return VisitInstanceFieldsReferences(klass, obj, visitor);
- }
+ // Push a single reference on a mark stack.
+ void PushOnMarkStack(mirror::Object* obj);
// Blackens objects grayed during a garbage collection.
void ScanGrayObjects(bool paused, byte minimum_age)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Schedules an unmarked object for reference processing.
- void DelayReferenceReferent(mirror::Class* klass, mirror::Object* reference)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
// Recursively blackens objects on the mark stack.
void ProcessMarkStack(bool paused)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
@@ -313,31 +271,25 @@ class MarkSweep : public GarbageCollector {
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void EnqueueFinalizerReferences(mirror::Object** ref)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- void PreserveSomeSoftReferences(mirror::Object** ref)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- void ClearWhiteReferences(mirror::Object** list)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
// Used to get around thread safety annotations. The call is from MarkingPhase and is guarded by
// IsExclusiveHeld.
void RevokeAllThreadLocalAllocationStacks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
+ // Revoke all the thread-local buffers.
+ void RevokeAllThreadLocalBuffers();
+
// Whether or not we count how many of each type of object were scanned.
static const bool kCountScannedTypes = false;
// Current space, we check this space first to avoid searching for the appropriate space for an
// object.
- accounting::SpaceBitmap* current_mark_bitmap_;
+ accounting::SpaceBitmap* current_space_bitmap_;
+ // Cache the heap's mark bitmap to prevent having to do 2 loads during slow path marking.
+ accounting::HeapBitmap* mark_bitmap_;
accounting::ObjectStack* mark_stack_;
- // Immune range, every object inside the immune range is assumed to be marked.
+ // Immune region, every object inside the immune range is assumed to be marked.
ImmuneRegion immune_region_;
// Parallel finger.
@@ -350,11 +302,14 @@ class MarkSweep : public GarbageCollector {
AtomicInteger other_count_;
AtomicInteger large_object_test_;
AtomicInteger large_object_mark_;
- AtomicInteger classes_marked_;
AtomicInteger overhead_time_;
AtomicInteger work_chunks_created_;
AtomicInteger work_chunks_deleted_;
AtomicInteger reference_count_;
+ AtomicInteger mark_null_count_;
+ AtomicInteger mark_immune_count_;
+ AtomicInteger mark_fastpath_count_;
+ AtomicInteger mark_slowpath_count_;
// Verification.
size_t live_stack_freeze_size_;
@@ -373,6 +328,7 @@ class MarkSweep : public GarbageCollector {
friend class art::gc::Heap;
friend class InternTableEntryIsUnmarked;
friend class MarkIfReachesAllocspaceVisitor;
+ friend class MarkObjectVisitor;
friend class ModUnionCheckReferences;
friend class ModUnionClearCardVisitor;
friend class ModUnionReferenceVisitor;
diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h
index 3b8f7c3..d60298b 100644
--- a/runtime/gc/collector/semi_space-inl.h
+++ b/runtime/gc/collector/semi_space-inl.h
@@ -17,6 +17,11 @@
#ifndef ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_INL_H_
#define ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_INL_H_
+#include "semi_space.h"
+
+#include "gc/accounting/heap_bitmap.h"
+#include "mirror/object-inl.h"
+
namespace art {
namespace gc {
namespace collector {
@@ -30,6 +35,60 @@ inline mirror::Object* SemiSpace::GetForwardingAddressInFromSpace(mirror::Object
return reinterpret_cast<mirror::Object*>(lock_word.ForwardingAddress());
}
+// Used to mark and copy objects. Any newly-marked objects who are in the from space get moved to
+// the to-space and have their forward address updated. Objects which have been newly marked are
+// pushed on the mark stack.
+template<bool kPoisonReferences>
+inline void SemiSpace::MarkObject(
+ mirror::ObjectReference<kPoisonReferences, mirror::Object>* obj_ptr) {
+ mirror::Object* obj = obj_ptr->AsMirrorPtr();
+ if (obj == nullptr) {
+ return;
+ }
+ if (kUseBrooksPointer) {
+ // Verify all the objects have the correct forward pointer installed.
+ obj->AssertSelfBrooksPointer();
+ }
+ if (!immune_region_.ContainsObject(obj)) {
+ if (from_space_->HasAddress(obj)) {
+ mirror::Object* forward_address = GetForwardingAddressInFromSpace(obj);
+ // If the object has already been moved, return the new forward address.
+ if (forward_address == nullptr) {
+ forward_address = MarkNonForwardedObject(obj);
+ DCHECK(forward_address != nullptr);
+ // Make sure to only update the forwarding address AFTER you copy the object so that the
+ // monitor word doesn't get stomped over.
+ obj->SetLockWord(LockWord::FromForwardingAddress(
+ reinterpret_cast<size_t>(forward_address)));
+ // Push the object onto the mark stack for later processing.
+ MarkStackPush(forward_address);
+ }
+ obj_ptr->Assign(forward_address);
+ } else {
+ accounting::SpaceBitmap* object_bitmap =
+ heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
+ if (LIKELY(object_bitmap != nullptr)) {
+ if (generational_) {
+ // If a bump pointer space only collection, we should not
+ // reach here as we don't/won't mark the objects in the
+ // non-moving space (except for the promoted objects.) Note
+ // the non-moving space is added to the immune space.
+ DCHECK(whole_heap_collection_);
+ }
+ if (!object_bitmap->Set(obj)) {
+ // This object was not previously marked.
+ MarkStackPush(obj);
+ }
+ } else {
+ CHECK(!to_space_->HasAddress(obj)) << "Marking " << obj << " in to_space_";
+ if (MarkLargeObject(obj)) {
+ MarkStackPush(obj);
+ }
+ }
+ }
+ }
+}
+
} // namespace collector
} // namespace gc
} // namespace art
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 5b9c397..222bd63 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "semi_space.h"
+#include "semi_space-inl.h"
#include <functional>
#include <numeric>
@@ -25,7 +25,7 @@
#include "base/macros.h"
#include "base/mutex-inl.h"
#include "base/timing_logger.h"
-#include "gc/accounting/heap_bitmap.h"
+#include "gc/accounting/heap_bitmap-inl.h"
#include "gc/accounting/mod_union_table.h"
#include "gc/accounting/remembered_set.h"
#include "gc/accounting/space_bitmap-inl.h"
@@ -50,7 +50,7 @@
#include "mirror/object_array.h"
#include "mirror/object_array-inl.h"
#include "runtime.h"
-#include "semi_space-inl.h"
+#include "stack.h"
#include "thread-inl.h"
#include "thread_list.h"
#include "verifier/method_verifier.h"
@@ -97,18 +97,13 @@ void SemiSpace::BindBitmaps() {
SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix)
: GarbageCollector(heap,
name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"),
- mark_stack_(nullptr),
- is_large_object_space_immune_(false),
to_space_(nullptr),
- to_space_live_bitmap_(nullptr),
from_space_(nullptr),
- self_(nullptr),
generational_(generational),
last_gc_to_space_end_(nullptr),
bytes_promoted_(0),
whole_heap_collection_(true),
- whole_heap_collection_interval_counter_(0),
- saved_bytes_(0) {
+ whole_heap_collection_interval_counter_(0) {
}
void SemiSpace::InitializePhase() {
@@ -214,7 +209,7 @@ void SemiSpace::UpdateAndMarkModUnion() {
space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
"UpdateAndMarkImageModUnionTable",
&timings_);
- table->UpdateAndMarkReferences(MarkObjectCallback, this);
+ table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
} else if (heap_->FindRememberedSetFromSpace(space) != nullptr) {
DCHECK(kUseRememberedSet);
// If a bump pointer space only collection, the non-moving
@@ -246,7 +241,8 @@ void SemiSpace::UpdateAndMarkModUnion() {
class SemiSpaceScanObjectVisitor {
public:
explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {}
- void operator()(Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
+ void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
// TODO: fix NO_THREAD_SAFETY_ANALYSIS. ScanObject() requires an
// exclusive lock on the mutator lock, but
// SpaceBitmap::VisitMarkedRange() only requires the shared lock.
@@ -263,22 +259,22 @@ class SemiSpaceVerifyNoFromSpaceReferencesVisitor {
explicit SemiSpaceVerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace* from_space) :
from_space_(from_space) {}
- void operator()(Object* obj, Object* ref, const MemberOffset& offset, bool /* is_static */)
- const ALWAYS_INLINE {
+ void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+ mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset, false);
if (from_space_->HasAddress(ref)) {
Runtime::Current()->GetHeap()->DumpObject(LOG(INFO), obj);
+ LOG(FATAL) << ref << " found in from space";
}
- DCHECK(!from_space_->HasAddress(ref));
}
private:
space::ContinuousMemMapAllocSpace* from_space_;
};
void SemiSpace::VerifyNoFromSpaceReferences(Object* obj) {
- DCHECK(obj != NULL);
DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
SemiSpaceVerifyNoFromSpaceReferencesVisitor visitor(from_space_);
- MarkSweep::VisitObjectReferences(obj, visitor, kMovingClasses);
+ obj->VisitReferences<kMovingClasses>(visitor);
}
class SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor {
@@ -313,7 +309,7 @@ void SemiSpace::MarkReachableObjects() {
accounting::RememberedSet* rem_set = heap_->FindRememberedSetFromSpace(space);
if (kUseRememberedSet) {
DCHECK(rem_set != nullptr);
- rem_set->UpdateAndMarkReferences(MarkObjectCallback, from_space_, this);
+ rem_set->UpdateAndMarkReferences(MarkHeapReferenceCallback, from_space_, this);
if (kIsDebugBuild) {
// Verify that there are no from-space references that
// remain in the space, that is, the remembered set (and the
@@ -475,9 +471,9 @@ static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size
memcpy(dest, src, page_remain);
byte_src += page_remain;
byte_dest += page_remain;
- CHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), kPageSize);
- CHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), sizeof(uintptr_t));
- CHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_src), sizeof(uintptr_t));
+ DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), kPageSize);
+ DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), sizeof(uintptr_t));
+ DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_src), sizeof(uintptr_t));
while (byte_src + kPageSize < limit) {
bool all_zero = true;
uintptr_t* word_dest = reinterpret_cast<uintptr_t*>(byte_dest);
@@ -579,74 +575,28 @@ mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
return forward_address;
}
-// Used to mark and copy objects. Any newly-marked objects who are in the from space get moved to
-// the to-space and have their forward address updated. Objects which have been newly marked are
-// pushed on the mark stack.
-Object* SemiSpace::MarkObject(Object* obj) {
- if (kUseBrooksPointer) {
- // Verify all the objects have the correct forward pointer installed.
- if (obj != nullptr) {
- obj->AssertSelfBrooksPointer();
- }
- }
- Object* forward_address = obj;
- if (obj != nullptr && !immune_region_.ContainsObject(obj)) {
- if (from_space_->HasAddress(obj)) {
- forward_address = GetForwardingAddressInFromSpace(obj);
- // If the object has already been moved, return the new forward address.
- if (forward_address == nullptr) {
- forward_address = MarkNonForwardedObject(obj);
- DCHECK(forward_address != nullptr);
- // Make sure to only update the forwarding address AFTER you copy the object so that the
- // monitor word doesn't get stomped over.
- obj->SetLockWord(LockWord::FromForwardingAddress(
- reinterpret_cast<size_t>(forward_address)));
- // Push the object onto the mark stack for later processing.
- MarkStackPush(forward_address);
- }
- // TODO: Do we need this if in the else statement?
- } else {
- accounting::SpaceBitmap* object_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
- if (LIKELY(object_bitmap != nullptr)) {
- if (generational_) {
- // If a bump pointer space only collection, we should not
- // reach here as we don't/won't mark the objects in the
- // non-moving space (except for the promoted objects.) Note
- // the non-moving space is added to the immune space.
- DCHECK(whole_heap_collection_);
- }
- // This object was not previously marked.
- if (!object_bitmap->Test(obj)) {
- object_bitmap->Set(obj);
- MarkStackPush(obj);
- }
- } else {
- CHECK(!to_space_->HasAddress(obj)) << "Marking object in to_space_";
- if (MarkLargeObject(obj)) {
- MarkStackPush(obj);
- }
- }
- }
- }
- return forward_address;
-}
-
void SemiSpace::ProcessMarkStackCallback(void* arg) {
- DCHECK(arg != nullptr);
reinterpret_cast<SemiSpace*>(arg)->ProcessMarkStack();
}
mirror::Object* SemiSpace::MarkObjectCallback(mirror::Object* root, void* arg) {
- DCHECK(root != nullptr);
- DCHECK(arg != nullptr);
- return reinterpret_cast<SemiSpace*>(arg)->MarkObject(root);
+ auto ref = StackReference<mirror::Object>::FromMirrorPtr(root);
+ reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref);
+ return ref.AsMirrorPtr();
+}
+
+void SemiSpace::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr,
+ void* arg) {
+ reinterpret_cast<SemiSpace*>(arg)->MarkObject(obj_ptr);
}
void SemiSpace::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/,
RootType /*root_type*/) {
- DCHECK(root != nullptr);
- DCHECK(arg != nullptr);
- *root = reinterpret_cast<SemiSpace*>(arg)->MarkObject(*root);
+ auto ref = StackReference<mirror::Object>::FromMirrorPtr(*root);
+ reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref);
+ if (*root != ref.AsMirrorPtr()) {
+ *root = ref.AsMirrorPtr();
+ }
}
// Marks all objects in the root set.
@@ -708,42 +658,36 @@ void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
-void SemiSpace::DelayReferenceReferent(mirror::Class* klass, Object* obj) {
- heap_->DelayReferenceReferent(klass, obj->AsReference(), MarkedForwardingAddressCallback, this);
+void SemiSpace::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
+ heap_->DelayReferenceReferent(klass, reference, MarkedForwardingAddressCallback, this);
}
class SemiSpaceMarkObjectVisitor {
public:
- explicit SemiSpaceMarkObjectVisitor(SemiSpace* semi_space) : semi_space_(semi_space) {
- }
-
- void operator()(Object* obj, Object* ref, const MemberOffset& offset, bool /* is_static */)
- const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS /* EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) */ {
- mirror::Object* new_address = semi_space_->MarkObject(ref);
- if (new_address != ref) {
- DCHECK(new_address != nullptr);
- // Don't need to mark the card since we updating the object address and not changing the
- // actual objects its pointing to. Using SetFieldObjectWithoutWriteBarrier is better in this
- // case since it does not dirty cards and use additional memory.
- // Since we do not change the actual object, we can safely use non-transactional mode. Also
- // disable check as we could run inside a transaction.
- obj->SetFieldObjectWithoutWriteBarrier<false, false, kVerifyNone>(offset, new_address, false);
- }
+ explicit SemiSpaceMarkObjectVisitor(SemiSpace* collector) : collector_(collector) {
}
+
+ void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ // Object was already verified when we scanned it.
+ collector_->MarkObject(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset));
+ }
+
+ void operator()(mirror::Class* klass, mirror::Reference* ref) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ collector_->DelayReferenceReferent(klass, ref);
+ }
+
private:
- SemiSpace* const semi_space_;
+ SemiSpace* const collector_;
};
// Visit all of the references of an object and update.
void SemiSpace::ScanObject(Object* obj) {
- DCHECK(obj != NULL);
DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
SemiSpaceMarkObjectVisitor visitor(this);
- MarkSweep::VisitObjectReferences(obj, visitor, kMovingClasses);
- mirror::Class* klass = obj->GetClass<kVerifyNone>();
- if (UNLIKELY(klass->IsReferenceClass<kVerifyNone>())) {
- DelayReferenceReferent(klass, obj);
- }
+ obj->VisitReferences<kMovingClasses>(visitor, visitor);
}
// Scan anything that's on the mark stack.
@@ -782,8 +726,8 @@ inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
return obj;
}
if (from_space_->HasAddress(obj)) {
- mirror::Object* forwarding_address = GetForwardingAddressInFromSpace(const_cast<Object*>(obj));
- return forwarding_address; // Returns either the forwarding address or nullptr.
+ // Returns either the forwarding address or nullptr.
+ return GetForwardingAddressInFromSpace(obj);
} else if (to_space_->HasAddress(obj)) {
// Should be unlikely.
// Already forwarded, must be marked.
@@ -807,38 +751,12 @@ void SemiSpace::FinishPhase() {
Heap* heap = GetHeap();
timings_.NewSplit("PostGcVerification");
heap->PostGcVerification(this);
-
// Null the "to" and "from" spaces since compacting from one to the other isn't valid until
// further action is done by the heap.
to_space_ = nullptr;
from_space_ = nullptr;
-
- // Update the cumulative statistics
- total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects();
- total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes();
-
- // Ensure that the mark stack is empty.
CHECK(mark_stack_->IsEmpty());
-
- // Update the cumulative loggers.
- cumulative_timings_.Start();
- cumulative_timings_.AddLogger(timings_);
- cumulative_timings_.End();
-
- // Clear all of the spaces' mark bitmaps.
- for (const auto& space : GetHeap()->GetContinuousSpaces()) {
- accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
- if (bitmap != nullptr &&
- space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
- bitmap->Clear();
- }
- }
mark_stack_->Reset();
-
- // Reset the marked large objects.
- space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
- large_objects->GetMarkObjects()->Clear();
-
if (generational_) {
// Decide whether to do a whole heap collection or a bump pointer
// only space collection at the next collection by updating
@@ -856,6 +774,15 @@ void SemiSpace::FinishPhase() {
whole_heap_collection_ = false;
}
}
+ // Clear all of the spaces' mark bitmaps.
+ WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ heap_->ClearMarkedObjects();
+}
+
+void SemiSpace::RevokeAllThreadLocalBuffers() {
+ timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers");
+ GetHeap()->RevokeAllThreadLocalBuffers();
+ timings_.EndSplit();
}
} // namespace collector
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 08bfbc4..f067cb2 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -28,37 +28,28 @@
namespace art {
+class Thread;
+
namespace mirror {
class Class;
class Object;
- template<class T> class ObjectArray;
} // namespace mirror
-class StackVisitor;
-class Thread;
-
namespace gc {
+class Heap;
+
namespace accounting {
template <typename T> class AtomicStack;
- class MarkIfReachesAllocspaceVisitor;
- class ModUnionClearCardVisitor;
- class ModUnionVisitor;
- class ModUnionTableBitmap;
- class MarkStackChunk;
typedef AtomicStack<mirror::Object*> ObjectStack;
class SpaceBitmap;
} // namespace accounting
namespace space {
- class BumpPointerSpace;
class ContinuousMemMapAllocSpace;
class ContinuousSpace;
- class MallocSpace;
} // namespace space
-class Heap;
-
namespace collector {
class SemiSpace : public GarbageCollector {
@@ -66,23 +57,24 @@ class SemiSpace : public GarbageCollector {
// If true, use remembered sets in the generational mode.
static constexpr bool kUseRememberedSet = true;
- explicit SemiSpace(Heap* heap, bool generational = false,
- const std::string& name_prefix = "");
+ explicit SemiSpace(Heap* heap, bool generational = false, const std::string& name_prefix = "");
~SemiSpace() {}
- virtual void InitializePhase();
- virtual bool IsConcurrent() const {
- return false;
- }
- virtual void MarkingPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
- virtual void ReclaimPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
- virtual void FinishPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
- virtual void MarkReachableObjects()
+ virtual void InitializePhase() OVERRIDE;
+ virtual void MarkingPhase() OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+ LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+ virtual void ReclaimPhase() OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+ LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+ virtual void FinishPhase() OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void MarkReachableObjects()
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
- virtual GcType GetGcType() const {
+ virtual GcType GetGcType() const OVERRIDE {
return kGcTypePartial;
}
+ virtual CollectorType GetCollectorType() const OVERRIDE {
+ return generational_ ? kCollectorTypeGSS : kCollectorTypeSS;
+ }
// Sets which space we will be copying objects to.
void SetToSpace(space::ContinuousMemMapAllocSpace* to_space);
@@ -97,11 +89,14 @@ class SemiSpace : public GarbageCollector {
void FindDefaultMarkBitmap();
// Returns the new address of the object.
- mirror::Object* MarkObject(mirror::Object* object)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ template<bool kPoisonReferences>
+ void MarkObject(mirror::ObjectReference<kPoisonReferences, mirror::Object>* obj_ptr)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void ScanObject(mirror::Object* obj)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void VerifyNoFromSpaceReferences(mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
@@ -112,12 +107,13 @@ class SemiSpace : public GarbageCollector {
// Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
// the image. Mark that portion of the heap as immune.
- virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
void UnBindBitmaps()
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- void ProcessReferences(Thread* self)
+ void ProcessReferences(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
// Sweeps unmarked objects to complete the garbage collection.
@@ -126,22 +122,9 @@ class SemiSpace : public GarbageCollector {
// Sweeps unmarked objects to complete the garbage collection.
void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- // Sweep only pointers within an array. WARNING: Trashes objects.
- void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
- // TODO: enable thread safety analysis when in use by multiple worker threads.
- template <typename MarkVisitor>
- void ScanObjectVisit(const mirror::Object* obj, const MarkVisitor& visitor)
- NO_THREAD_SAFETY_ANALYSIS;
-
void SweepSystemWeaks()
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
- template <typename Visitor>
- static void VisitObjectReferencesAndClass(mirror::Object* obj, const Visitor& visitor)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
static void MarkRootCallback(mirror::Object** root, void* arg, uint32_t /*tid*/,
RootType /*root_type*/)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
@@ -149,24 +132,36 @@ class SemiSpace : public GarbageCollector {
static mirror::Object* MarkObjectCallback(mirror::Object* root, void* arg)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ static void MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr, void* arg)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
static void ProcessMarkStackCallback(void* arg)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Schedules an unmarked object for reference processing.
+ void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
protected:
// Returns null if the object is not marked, otherwise returns the forwarding address (same as
// object for non movable things).
- mirror::Object* GetMarkedForwardAddress(mirror::Object* object) const;
+ mirror::Object* GetMarkedForwardAddress(mirror::Object* object) const
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
static mirror::Object* MarkedForwardingAddressCallback(mirror::Object* object, void* arg)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Marks or unmarks a large object based on whether or not set is true. If set is true, then we
// mark, otherwise we unmark.
bool MarkLargeObject(const mirror::Object* obj)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Expand mark stack to 2x its current size.
void ResizeMarkStack(size_t new_size);
@@ -174,78 +169,22 @@ class SemiSpace : public GarbageCollector {
// Returns true if we should sweep the space.
virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const;
- static void VerifyRootCallback(const mirror::Object* root, void* arg, size_t vreg,
- const StackVisitor *visitor);
-
- void VerifyRoot(const mirror::Object* root, size_t vreg, const StackVisitor* visitor)
- NO_THREAD_SAFETY_ANALYSIS;
-
- template <typename Visitor>
- static void VisitInstanceFieldsReferences(const mirror::Class* klass, const mirror::Object* obj,
- const Visitor& visitor)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
- // Visit the header, static field references, and interface pointers of a class object.
- template <typename Visitor>
- static void VisitClassReferences(const mirror::Class* klass, const mirror::Object* obj,
- const Visitor& visitor)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
- template <typename Visitor>
- static void VisitStaticFieldsReferences(const mirror::Class* klass, const Visitor& visitor)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
- template <typename Visitor>
- static void VisitFieldsReferences(const mirror::Object* obj, uint32_t ref_offsets, bool is_static,
- const Visitor& visitor)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
- // Visit all of the references in an object array.
- template <typename Visitor>
- static void VisitObjectArrayReferences(const mirror::ObjectArray<mirror::Object>* array,
- const Visitor& visitor)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
- // Visits the header and field references of a data object.
- template <typename Visitor>
- static void VisitOtherReferences(const mirror::Class* klass, const mirror::Object* obj,
- const Visitor& visitor)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
- return VisitInstanceFieldsReferences(klass, obj, visitor);
- }
-
// Push an object onto the mark stack.
- inline void MarkStackPush(mirror::Object* obj);
+ void MarkStackPush(mirror::Object* obj);
void UpdateAndMarkModUnion()
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Schedules an unmarked object for reference processing.
- void DelayReferenceReferent(mirror::Class* klass, mirror::Object* reference)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
// Recursively blackens objects on the mark stack.
void ProcessMarkStack()
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
- void EnqueueFinalizerReferences(mirror::Object** ref)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
-
- void PreserveSomeSoftReferences(mirror::Object** ref)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
-
- void ClearWhiteReferences(mirror::Object** list)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
-
- void ProcessReferences(mirror::Object** soft_references, bool clear_soft_references,
- mirror::Object** weak_references,
- mirror::Object** finalizer_references,
- mirror::Object** phantom_references)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
-
inline mirror::Object* GetForwardingAddressInFromSpace(mirror::Object* obj) const;
+ // Revoke all the thread-local buffers.
+ void RevokeAllThreadLocalBuffers();
+
// Current space, we check this space first to avoid searching for the appropriate space for an
// object.
accounting::ObjectStack* mark_stack_;
diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h
index 934b1bd..4f9dabf 100644
--- a/runtime/gc/collector/sticky_mark_sweep.h
+++ b/runtime/gc/collector/sticky_mark_sweep.h
@@ -46,10 +46,6 @@ class StickyMarkSweep FINAL : public PartialMarkSweep {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- // Don't need to do anything special here since we scan all the cards which may have references
- // to the newly allocated objects.
- void UpdateAndMarkModUnion() OVERRIDE { }
-
private:
DISALLOW_COPY_AND_ASSIGN(StickyMarkSweep);
};
diff --git a/runtime/gc/collector_type.h b/runtime/gc/collector_type.h
index 98c27fb..c0a6b6a 100644
--- a/runtime/gc/collector_type.h
+++ b/runtime/gc/collector_type.h
@@ -36,6 +36,8 @@ enum CollectorType {
kCollectorTypeGSS,
// Heap trimming collector, doesn't do any actual collecting.
kCollectorTypeHeapTrim,
+ // A (mostly) concurrent copying collector.
+ kCollectorTypeCC,
};
std::ostream& operator<<(std::ostream& os, const CollectorType& collector_type);
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 6cc44c9..8bfe793 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -118,11 +118,11 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas
} else {
DCHECK(!Dbg::IsAllocTrackingEnabled());
}
- // concurrent_gc_ isn't known at compile time so we can optimize by not checking it for
+ // IsConcurrentGc() isn't known at compile time so we can optimize by not checking it for
// the BumpPointer or TLAB allocators. This is nice since it allows the entire if statement to be
// optimized out. And for the other allocators, AllocatorMayHaveConcurrentGC is a constant since
// the allocator_type should be constant propagated.
- if (AllocatorMayHaveConcurrentGC(allocator) && concurrent_gc_) {
+ if (AllocatorMayHaveConcurrentGC(allocator) && IsGcConcurrent()) {
CheckConcurrentGC(self, new_num_bytes_allocated, &obj);
}
VerifyObject(obj);
@@ -276,7 +276,7 @@ inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t
if (UNLIKELY(new_footprint > growth_limit_)) {
return true;
}
- if (!AllocatorMayHaveConcurrentGC(allocator_type) || !concurrent_gc_) {
+ if (!AllocatorMayHaveConcurrentGC(allocator_type) || !IsGcConcurrent()) {
if (!kGrow) {
return true;
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index e8ee62f..1a32a9a 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -35,6 +35,7 @@
#include "gc/accounting/mod_union_table-inl.h"
#include "gc/accounting/remembered_set.h"
#include "gc/accounting/space_bitmap-inl.h"
+#include "gc/collector/concurrent_copying.h"
#include "gc/collector/mark_sweep-inl.h"
#include "gc/collector/partial_mark_sweep.h"
#include "gc/collector/semi_space.h"
@@ -70,6 +71,8 @@ namespace art {
namespace gc {
+static constexpr size_t kCollectorTransitionStressIterations = 0;
+static constexpr size_t kCollectorTransitionStressWait = 10 * 1000; // Microseconds
static constexpr bool kGCALotMode = false;
static constexpr size_t kGcAlotInterval = KB;
// Minimum amount of remaining bytes before a concurrent GC is triggered.
@@ -88,7 +91,6 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
rosalloc_space_(nullptr),
dlmalloc_space_(nullptr),
main_space_(nullptr),
- concurrent_gc_(false),
collector_type_(kCollectorTypeNone),
post_zygote_collector_type_(post_zygote_collector_type),
background_collector_type_(background_collector_type),
@@ -277,7 +279,8 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
// Card cache for now since it makes it easier for us to update the references to the copying
// spaces.
accounting::ModUnionTable* mod_union_table =
- new accounting::ModUnionTableCardCache("Image mod-union table", this, GetImageSpace());
+ new accounting::ModUnionTableToZygoteAllocspace("Image mod-union table", this,
+ GetImageSpace());
CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
AddModUnionTable(mod_union_table);
@@ -326,6 +329,9 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
bool generational = post_zygote_collector_type_ == kCollectorTypeGSS;
semi_space_collector_ = new collector::SemiSpace(this, generational);
garbage_collectors_.push_back(semi_space_collector_);
+
+ concurrent_copying_collector_ = new collector::ConcurrentCopying(this);
+ garbage_collectors_.push_back(concurrent_copying_collector_);
}
if (running_on_valgrind_) {
@@ -482,6 +488,13 @@ void Heap::DecrementDisableMovingGC(Thread* self) {
void Heap::UpdateProcessState(ProcessState process_state) {
if (process_state_ != process_state) {
process_state_ = process_state;
+ for (size_t i = 1; i <= kCollectorTransitionStressIterations; ++i) {
+ // Start at index 1 to avoid "is always false" warning.
+ // Have iteration 1 always transition the collector.
+ TransitionCollector((((i & 1) == 1) == (process_state_ == kProcessStateJankPerceptible))
+ ? post_zygote_collector_type_ : background_collector_type_);
+ usleep(kCollectorTransitionStressWait);
+ }
if (process_state_ == kProcessStateJankPerceptible) {
// Transition back to foreground right away to prevent jank.
RequestCollectorTransition(post_zygote_collector_type_, 0);
@@ -649,9 +662,9 @@ void Heap::DumpGcPerformanceInfo(std::ostream& os) {
// Dump cumulative loggers for each GC type.
uint64_t total_paused_time = 0;
for (const auto& collector : garbage_collectors_) {
- CumulativeLogger& logger = collector->GetCumulativeTimings();
+ const CumulativeLogger& logger = collector->GetCumulativeTimings();
if (logger.GetTotalNs() != 0) {
- os << Dumpable<CumulativeLogger>(logger);
+ os << ConstDumpable<CumulativeLogger>(logger);
const uint64_t total_ns = logger.GetTotalNs();
const uint64_t total_pause_ns = collector->GetTotalPausedTimeNs();
double seconds = NsToMs(logger.GetTotalNs()) / 1000.0;
@@ -699,9 +712,11 @@ Heap::~Heap() {
allocation_stack_->Reset();
live_stack_->Reset();
STLDeleteValues(&mod_union_tables_);
+ STLDeleteValues(&remembered_sets_);
STLDeleteElements(&continuous_spaces_);
STLDeleteElements(&discontinuous_spaces_);
delete gc_complete_lock_;
+ delete heap_trim_request_lock_;
VLOG(heap) << "Finished ~Heap()";
}
@@ -751,23 +766,32 @@ mirror::Object* Heap::PreserveSoftReferenceCallback(mirror::Object* obj, void* a
return args->mark_callback_(obj, args->arg_);
}
-// Process reference class instances and schedule finalizations.
-void Heap::ProcessReferences(TimingLogger& timings, bool clear_soft,
- IsMarkedCallback* is_marked_callback,
- MarkObjectCallback* mark_object_callback,
- ProcessMarkStackCallback* process_mark_stack_callback, void* arg) {
- // Unless we are in the zygote or required to clear soft references with white references,
- // preserve some white referents.
- if (!clear_soft && !Runtime::Current()->IsZygote()) {
+void Heap::ProcessSoftReferences(TimingLogger& timings, bool clear_soft,
+ IsMarkedCallback* is_marked_callback,
+ MarkObjectCallback* mark_object_callback,
+ ProcessMarkStackCallback* process_mark_stack_callback, void* arg) {
+ // Unless required to clear soft references with white references, preserve some white referents.
+ if (!clear_soft) {
+ // Don't clear for sticky GC.
SoftReferenceArgs soft_reference_args;
soft_reference_args.is_marked_callback_ = is_marked_callback;
soft_reference_args.mark_callback_ = mark_object_callback;
soft_reference_args.arg_ = arg;
+ // References with a marked referent are removed from the list.
soft_reference_queue_.PreserveSomeSoftReferences(&PreserveSoftReferenceCallback,
&soft_reference_args);
process_mark_stack_callback(arg);
}
+}
+
+// Process reference class instances and schedule finalizations.
+void Heap::ProcessReferences(TimingLogger& timings, bool clear_soft,
+ IsMarkedCallback* is_marked_callback,
+ MarkObjectCallback* mark_object_callback,
+ ProcessMarkStackCallback* process_mark_stack_callback, void* arg) {
timings.StartSplit("(Paused)ProcessReferences");
+ ProcessSoftReferences(timings, clear_soft, is_marked_callback, mark_object_callback,
+ process_mark_stack_callback, arg);
// Clear all remaining soft and weak references with white referents.
soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
@@ -796,7 +820,8 @@ void Heap::ProcessReferences(TimingLogger& timings, bool clear_soft,
// marked, put it on the appropriate list in the heap for later processing.
void Heap::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
IsMarkedCallback is_marked_callback, void* arg) {
- DCHECK_EQ(klass, ref->GetClass());
+ // klass can be the class of the old object if the visitor already updated the class of ref.
+ DCHECK(klass->IsReferenceClass());
mirror::Object* referent = ref->GetReferent();
if (referent != nullptr) {
mirror::Object* forward_address = is_marked_callback(referent, arg);
@@ -1009,20 +1034,20 @@ bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
}
if (search_allocation_stack) {
if (sorted) {
- if (allocation_stack_->ContainsSorted(const_cast<mirror::Object*>(obj))) {
+ if (allocation_stack_->ContainsSorted(obj)) {
return true;
}
- } else if (allocation_stack_->Contains(const_cast<mirror::Object*>(obj))) {
+ } else if (allocation_stack_->Contains(obj)) {
return true;
}
}
if (search_live_stack) {
if (sorted) {
- if (live_stack_->ContainsSorted(const_cast<mirror::Object*>(obj))) {
+ if (live_stack_->ContainsSorted(obj)) {
return true;
}
- } else if (live_stack_->Contains(const_cast<mirror::Object*>(obj))) {
+ } else if (live_stack_->Contains(obj)) {
return true;
}
}
@@ -1092,8 +1117,12 @@ void Heap::VerifyHeap() {
GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
}
-void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) {
- DCHECK_LE(freed_bytes, num_bytes_allocated_.Load());
+void Heap::RecordFree(ssize_t freed_objects, ssize_t freed_bytes) {
+ // Use signed comparison since freed bytes can be negative when background compaction foreground
+ // transitions occurs. This is caused by the moving objects from a bump pointer space to a
+ // free list backed space typically increasing memory footprint due to padding and binning.
+ DCHECK_LE(freed_bytes, static_cast<ssize_t>(num_bytes_allocated_.Load()));
+ DCHECK_GE(freed_objects, 0);
num_bytes_allocated_.FetchAndSub(freed_bytes);
if (Runtime::Current()->HasStatsEnabled()) {
RuntimeStats* thread_stats = Thread::Current()->GetStats();
@@ -1128,13 +1157,29 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocat
ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, usable_size);
}
+ collector::GcType tried_type = next_gc_type_;
+ if (ptr == nullptr) {
+ const bool gc_ran =
+ CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
+ if (was_default_allocator && allocator != GetCurrentAllocator()) {
+ *klass = sirt_klass.get();
+ return nullptr;
+ }
+ if (gc_ran) {
+ ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, usable_size);
+ }
+ }
+
// Loop through our different Gc types and try to Gc until we get enough free memory.
for (collector::GcType gc_type : gc_plan_) {
if (ptr != nullptr) {
break;
}
+ if (gc_type == tried_type) {
+ continue;
+ }
// Attempt to run the collector, if we succeed, re-try the allocation.
- bool gc_ran =
+ const bool gc_ran =
CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
if (was_default_allocator && allocator != GetCurrentAllocator()) {
*klass = sirt_klass.get();
@@ -1289,15 +1334,16 @@ class ReferringObjectsFinder {
// For bitmap Visit.
// TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
// annotalysis on visitors.
- void operator()(const mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS {
- collector::MarkSweep::VisitObjectReferences(const_cast<mirror::Object*>(o), *this, true);
+ void operator()(mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS {
+ o->VisitReferences<true>(*this);
}
- // For MarkSweep::VisitObjectReferences.
- void operator()(mirror::Object* referrer, mirror::Object* object,
- const MemberOffset&, bool) const {
- if (object == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
- referring_objects_.push_back(referrer);
+ // For Object::VisitReferences.
+ void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset, false);
+ if (ref == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
+ referring_objects_.push_back(obj);
}
}
@@ -1404,7 +1450,8 @@ void Heap::TransitionCollector(CollectorType collector_type) {
break;
}
default: {
- LOG(FATAL) << "Attempted to transition to invalid collector type";
+ LOG(FATAL) << "Attempted to transition to invalid collector type "
+ << static_cast<size_t>(collector_type);
break;
}
}
@@ -1414,7 +1461,7 @@ void Heap::TransitionCollector(CollectorType collector_type) {
// Can't call into java code with all threads suspended.
EnqueueClearedReferences();
uint64_t duration = NanoTime() - start_time;
- GrowForUtilization(collector::kGcTypeFull, duration);
+ GrowForUtilization(semi_space_collector_);
FinishGC(self, collector::kGcTypeFull);
int32_t after_size = GetTotalMemory();
int32_t delta_size = before_size - after_size;
@@ -1434,10 +1481,9 @@ void Heap::ChangeCollector(CollectorType collector_type) {
collector_type_ = collector_type;
gc_plan_.clear();
switch (collector_type_) {
- case kCollectorTypeSS:
- // Fall-through.
+ case kCollectorTypeCC: // Fall-through.
+ case kCollectorTypeSS: // Fall-through.
case kCollectorTypeGSS: {
- concurrent_gc_ = false;
gc_plan_.push_back(collector::kGcTypeFull);
if (use_tlab_) {
ChangeAllocator(kAllocatorTypeTLAB);
@@ -1447,7 +1493,6 @@ void Heap::ChangeCollector(CollectorType collector_type) {
break;
}
case kCollectorTypeMS: {
- concurrent_gc_ = false;
gc_plan_.push_back(collector::kGcTypeSticky);
gc_plan_.push_back(collector::kGcTypePartial);
gc_plan_.push_back(collector::kGcTypeFull);
@@ -1455,7 +1500,6 @@ void Heap::ChangeCollector(CollectorType collector_type) {
break;
}
case kCollectorTypeCMS: {
- concurrent_gc_ = true;
gc_plan_.push_back(collector::kGcTypeSticky);
gc_plan_.push_back(collector::kGcTypePartial);
gc_plan_.push_back(collector::kGcTypeFull);
@@ -1466,7 +1510,7 @@ void Heap::ChangeCollector(CollectorType collector_type) {
LOG(FATAL) << "Unimplemented";
}
}
- if (concurrent_gc_) {
+ if (IsGcConcurrent()) {
concurrent_start_bytes_ =
std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - kMinConcurrentRemainingBytes;
} else {
@@ -1790,37 +1834,41 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
if (compacting_gc) {
DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
current_allocator_ == kAllocatorTypeTLAB);
- gc_type = semi_space_collector_->GetGcType();
- CHECK(temp_space_->IsEmpty());
- semi_space_collector_->SetFromSpace(bump_pointer_space_);
- semi_space_collector_->SetToSpace(temp_space_);
+ if (collector_type_ == kCollectorTypeSS || collector_type_ == kCollectorTypeGSS) {
+ gc_type = semi_space_collector_->GetGcType();
+ semi_space_collector_->SetFromSpace(bump_pointer_space_);
+ semi_space_collector_->SetToSpace(temp_space_);
+ collector = semi_space_collector_;
+ } else if (collector_type_ == kCollectorTypeCC) {
+ gc_type = concurrent_copying_collector_->GetGcType();
+ collector = concurrent_copying_collector_;
+ } else {
+ LOG(FATAL) << "Unreachable - invalid collector type " << static_cast<size_t>(collector_type_);
+ }
temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
- collector = semi_space_collector_;
+ CHECK(temp_space_->IsEmpty());
gc_type = collector::kGcTypeFull;
} else if (current_allocator_ == kAllocatorTypeRosAlloc ||
current_allocator_ == kAllocatorTypeDlMalloc) {
- for (const auto& cur_collector : garbage_collectors_) {
- if (cur_collector->IsConcurrent() == concurrent_gc_ &&
- cur_collector->GetGcType() == gc_type) {
- collector = cur_collector;
- break;
- }
- }
+ collector = FindCollectorByGcType(gc_type);
} else {
LOG(FATAL) << "Invalid current allocator " << current_allocator_;
}
CHECK(collector != nullptr)
- << "Could not find garbage collector with concurrent=" << concurrent_gc_
- << " and type=" << gc_type;
+ << "Could not find garbage collector with collector_type="
+ << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
ATRACE_BEGIN(StringPrintf("%s %s GC", PrettyCause(gc_cause), collector->GetName()).c_str());
- collector->Run(gc_cause, clear_soft_references);
+ if (!clear_soft_references) {
+ clear_soft_references = gc_type != collector::kGcTypeSticky; // TODO: GSS?
+ }
+ collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
total_objects_freed_ever_ += collector->GetFreedObjects();
total_bytes_freed_ever_ += collector->GetFreedBytes();
RequestHeapTrim();
// Enqueue cleared references.
EnqueueClearedReferences();
// Grow the heap so that we know when to perform the next GC.
- GrowForUtilization(gc_type, collector->GetDurationNs());
+ GrowForUtilization(collector);
if (CareAboutPauseTimes()) {
const size_t duration = collector->GetDurationNs();
std::vector<uint64_t> pauses = collector->GetPauseTimes();
@@ -1849,9 +1897,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
<< percent_free << "% free, " << PrettySize(current_heap_size) << "/"
<< PrettySize(total_memory) << ", " << "paused " << pause_string.str()
<< " total " << PrettyDuration((duration / 1000) * 1000);
- if (VLOG_IS_ON(heap)) {
- LOG(INFO) << Dumpable<TimingLogger>(collector->GetTimings());
- }
+ VLOG(heap) << ConstDumpable<TimingLogger>(collector->GetTimings());
}
}
FinishGC(self, gc_type);
@@ -1898,10 +1944,18 @@ class VerifyReferenceVisitor {
return failed_;
}
- // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for smarter
- // analysis on visitors.
- void operator()(mirror::Object* obj, mirror::Object* ref,
- const MemberOffset& offset, bool /* is_static */) const
+ void operator()(mirror::Class* klass, mirror::Reference* ref) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ this->operator()(ref, mirror::Reference::ReferentOffset(), false);
+ }
+
+ void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ this->operator()(obj, obj->GetFieldObject<mirror::Object>(offset, false), offset);
+ }
+
+ // TODO: Fix the no thread safety analysis.
+ void operator()(mirror::Object* obj, mirror::Object* ref, MemberOffset offset) const
NO_THREAD_SAFETY_ANALYSIS {
if (ref == nullptr || IsLive(ref)) {
// Verify that the reference is live.
@@ -2002,7 +2056,7 @@ class VerifyReferenceVisitor {
static void VerifyRoots(mirror::Object** root, void* arg, uint32_t /*thread_id*/,
RootType /*root_type*/) {
VerifyReferenceVisitor* visitor = reinterpret_cast<VerifyReferenceVisitor*>(arg);
- (*visitor)(nullptr, *root, MemberOffset(0), true);
+ (*visitor)(nullptr, *root, MemberOffset(0));
}
private:
@@ -2021,11 +2075,7 @@ class VerifyObjectVisitor {
// be live or else how did we find it in the live bitmap?
VerifyReferenceVisitor visitor(heap_);
// The class doesn't count as a reference but we should verify it anyways.
- collector::MarkSweep::VisitObjectReferences(obj, visitor, true);
- if (obj->IsReferenceInstance()) {
- mirror::Reference* ref = obj->AsReference();
- visitor(obj, ref->GetReferent(), mirror::Reference::ReferentOffset(), false);
- }
+ obj->VisitReferences<true>(visitor, visitor);
failed_ = failed_ || visitor.Failed();
}
@@ -2090,11 +2140,12 @@ class VerifyReferenceCardVisitor {
// TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
// annotalysis on visitors.
- void operator()(mirror::Object* obj, mirror::Object* ref, const MemberOffset& offset,
- bool is_static) const NO_THREAD_SAFETY_ANALYSIS {
+ void operator()(mirror::Object* obj, MemberOffset offset, bool is_static) const
+ NO_THREAD_SAFETY_ANALYSIS {
+ mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset, false);
// Filter out class references since changing an object's class does not mark the card as dirty.
// Also handles large objects, since the only reference they hold is a class reference.
- if (ref != NULL && !ref->IsClass()) {
+ if (ref != nullptr && !ref->IsClass()) {
accounting::CardTable* card_table = heap_->GetCardTable();
// If the object is not dirty and it is referencing something in the live stack other than
// class, then it must be on a dirty card.
@@ -2106,8 +2157,8 @@ class VerifyReferenceCardVisitor {
// Card should be either kCardDirty if it got re-dirtied after we aged it, or
// kCardDirty - 1 if it didnt get touched since we aged it.
accounting::ObjectStack* live_stack = heap_->live_stack_.get();
- if (live_stack->ContainsSorted(const_cast<mirror::Object*>(ref))) {
- if (live_stack->ContainsSorted(const_cast<mirror::Object*>(obj))) {
+ if (live_stack->ContainsSorted(ref)) {
+ if (live_stack->ContainsSorted(obj)) {
LOG(ERROR) << "Object " << obj << " found in live stack";
}
if (heap_->GetLiveBitmap()->Test(obj)) {
@@ -2161,7 +2212,7 @@ class VerifyLiveStackReferences {
void operator()(mirror::Object* obj) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
- collector::MarkSweep::VisitObjectReferences(const_cast<mirror::Object*>(obj), visitor, true);
+ obj->VisitReferences<true>(visitor);
}
bool Failed() const {
@@ -2217,6 +2268,14 @@ void Heap::RevokeAllThreadLocalAllocationStacks(Thread* self) {
}
}
+void Heap::AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked() {
+ if (kIsDebugBuild) {
+ if (bump_pointer_space_ != nullptr) {
+ bump_pointer_space_->AssertAllThreadLocalBuffersAreRevoked();
+ }
+ }
+}
+
accounting::ModUnionTable* Heap::FindModUnionTableFromSpace(space::Space* space) {
auto it = mod_union_tables_.find(space);
if (it == mod_union_tables_.end()) {
@@ -2262,8 +2321,7 @@ void Heap::ProcessCards(TimingLogger& timings, bool use_rem_sets) {
}
}
-static mirror::Object* IdentityMarkObjectCallback(mirror::Object* obj, void*) {
- return obj;
+static void IdentityMarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>*, void*) {
}
void Heap::PreGcVerification(collector::GarbageCollector* gc) {
@@ -2301,7 +2359,7 @@ void Heap::PreGcVerification(collector::GarbageCollector* gc) {
ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
for (const auto& table_pair : mod_union_tables_) {
accounting::ModUnionTable* mod_union_table = table_pair.second;
- mod_union_table->UpdateAndMarkReferences(IdentityMarkObjectCallback, nullptr);
+ mod_union_table->UpdateAndMarkReferences(IdentityMarkHeapReferenceCallback, nullptr);
mod_union_table->Verify();
}
thread_list->ResumeAll();
@@ -2442,13 +2500,24 @@ void Heap::UpdateMaxNativeFootprint() {
native_footprint_limit_ = 2 * target_size - native_size;
}
-void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) {
+collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) {
+ for (const auto& collector : garbage_collectors_) {
+ if (collector->GetCollectorType() == collector_type_ &&
+ collector->GetGcType() == gc_type) {
+ return collector;
+ }
+ }
+ return nullptr;
+}
+
+void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran) {
// We know what our utilization is at this moment.
// This doesn't actually resize any memory. It just lets the heap grow more when necessary.
const size_t bytes_allocated = GetBytesAllocated();
last_gc_size_ = bytes_allocated;
last_gc_time_ns_ = NanoTime();
size_t target_size;
+ collector::GcType gc_type = collector_ran->GetGcType();
if (gc_type != collector::kGcTypeSticky) {
// Grow the heap for non sticky GC.
target_size = bytes_allocated / GetTargetHeapUtilization();
@@ -2460,12 +2529,22 @@ void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) {
native_need_to_run_finalization_ = true;
next_gc_type_ = collector::kGcTypeSticky;
} else {
- // Based on how close the current heap size is to the target size, decide
- // whether or not to do a partial or sticky GC next.
- if (bytes_allocated + min_free_ <= max_allowed_footprint_) {
+ collector::GcType non_sticky_gc_type =
+ have_zygote_space_ ? collector::kGcTypePartial : collector::kGcTypeFull;
+ // Find what the next non sticky collector will be.
+ collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
+ // If the throughput of the current sticky GC >= throughput of the non sticky collector, then
+ // do another sticky collection next.
+ // We also check that the bytes allocated aren't over the footprint limit in order to prevent a
+ // pathological case where dead objects which aren't reclaimed by sticky could get accumulated
+ // if the sticky GC throughput always remained >= the full/partial throughput.
+ if (collector_ran->GetEstimatedLastIterationThroughput() >=
+ non_sticky_collector->GetEstimatedMeanThroughput() &&
+ non_sticky_collector->GetIterations() > 0 &&
+ bytes_allocated <= max_allowed_footprint_) {
next_gc_type_ = collector::kGcTypeSticky;
} else {
- next_gc_type_ = have_zygote_space_ ? collector::kGcTypePartial : collector::kGcTypeFull;
+ next_gc_type_ = non_sticky_gc_type;
}
// If we have freed enough memory, shrink the heap back down.
if (bytes_allocated + max_free_ < max_allowed_footprint_) {
@@ -2476,10 +2555,10 @@ void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) {
}
if (!ignore_max_footprint_) {
SetIdealFootprint(target_size);
- if (concurrent_gc_) {
+ if (IsGcConcurrent()) {
// Calculate when to perform the next ConcurrentGC.
// Calculate the estimated GC duration.
- const double gc_duration_seconds = NsToMs(gc_duration) / 1000.0;
+ const double gc_duration_seconds = NsToMs(collector_ran->GetDurationNs()) / 1000.0;
// Estimate how many remaining bytes we will have when we need to start the next GC.
size_t remaining_bytes = allocation_rate_ * gc_duration_seconds;
remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
@@ -2639,6 +2718,12 @@ void Heap::RevokeThreadLocalBuffers(Thread* thread) {
}
}
+void Heap::RevokeRosAllocThreadLocalBuffers(Thread* thread) {
+ if (rosalloc_space_ != nullptr) {
+ rosalloc_space_->RevokeThreadLocalBuffers(thread);
+ }
+}
+
void Heap::RevokeAllThreadLocalBuffers() {
if (rosalloc_space_ != nullptr) {
rosalloc_space_->RevokeAllThreadLocalBuffers();
@@ -2696,7 +2781,7 @@ void Heap::RegisterNativeAllocation(JNIEnv* env, int bytes) {
// finalizers released native managed allocations.
UpdateMaxNativeFootprint();
} else if (!IsGCRequestPending()) {
- if (concurrent_gc_) {
+ if (IsGcConcurrent()) {
RequestConcurrentGC(self);
} else {
CollectGarbageInternal(gc_type, kGcCauseForAlloc, false);
@@ -2765,5 +2850,19 @@ void Heap::RemoveRememberedSet(space::Space* space) {
CHECK(remembered_sets_.find(space) == remembered_sets_.end());
}
+void Heap::ClearMarkedObjects() {
+ // Clear all of the spaces' mark bitmaps.
+ for (const auto& space : GetContinuousSpaces()) {
+ accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
+ if (space->GetLiveBitmap() != mark_bitmap) {
+ mark_bitmap->Clear();
+ }
+ }
+ // Clear the marked objects in the discontinous space object sets.
+ for (const auto& space : GetDiscontinuousSpaces()) {
+ space->GetMarkObjects()->Clear();
+ }
+}
+
} // namespace gc
} // namespace art
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index de20a4e..5879757 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -60,6 +60,7 @@ namespace accounting {
} // namespace accounting
namespace collector {
+ class ConcurrentCopying;
class GarbageCollector;
class MarkSweep;
class SemiSpace;
@@ -254,6 +255,9 @@ class Heap {
void IncrementDisableMovingGC(Thread* self);
void DecrementDisableMovingGC(Thread* self);
+ // Clear all of the mark bits, doesn't clear bitmaps which have the same live bits as mark bits.
+ void ClearMarkedObjects() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
// Initiates an explicit garbage collection.
void CollectGarbage(bool clear_soft_references);
@@ -314,6 +318,12 @@ class Heap {
}
static mirror::Object* PreserveSoftReferenceCallback(mirror::Object* obj, void* arg);
+ void ProcessSoftReferences(TimingLogger& timings, bool clear_soft,
+ IsMarkedCallback* is_marked_callback,
+ MarkObjectCallback* mark_object_callback,
+ ProcessMarkStackCallback* process_mark_stack_callback, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
void ProcessReferences(TimingLogger& timings, bool clear_soft,
IsMarkedCallback* is_marked_callback,
MarkObjectCallback* mark_object_callback,
@@ -347,7 +357,7 @@ class Heap {
// Freed bytes can be negative in cases where we copy objects from a compacted space to a
// free-list backed space.
- void RecordFree(size_t freed_objects, size_t freed_bytes);
+ void RecordFree(ssize_t freed_objects, ssize_t freed_bytes);
// Must be called if a field of an Object in the heap changes, and before any GC safe-point.
// The call is not needed if NULL is stored in the field.
@@ -431,7 +441,9 @@ class Heap {
void Trim() LOCKS_EXCLUDED(heap_trim_request_lock_);
void RevokeThreadLocalBuffers(Thread* thread);
+ void RevokeRosAllocThreadLocalBuffers(Thread* thread);
void RevokeAllThreadLocalBuffers();
+ void AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
void PreGcRosAllocVerification(TimingLogger* timings)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -547,6 +559,9 @@ class Heap {
void RemoveRememberedSet(space::Space* space);
bool IsCompilingBoot() const;
+ bool RunningOnValgrind() const {
+ return running_on_valgrind_;
+ }
bool HasImageSpace() const;
private:
@@ -564,7 +579,8 @@ class Heap {
return AllocatorHasAllocationStack(allocator_type);
}
static bool IsCompactingGC(CollectorType collector_type) {
- return collector_type == kCollectorTypeSS || collector_type == kCollectorTypeGSS;
+ return collector_type == kCollectorTypeSS || collector_type == kCollectorTypeGSS ||
+ collector_type == kCollectorTypeCC;
}
bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -647,10 +663,13 @@ class Heap {
// bytes allocated and the target utilization ratio.
void UpdateMaxNativeFootprint();
+ // Find a collector based on GC type.
+ collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
+
// Given the current contents of the alloc space, increase the allowed heap footprint to match
// the target utilization ratio. This should only be called immediately after a full garbage
// collection.
- void GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration);
+ void GrowForUtilization(collector::GarbageCollector* collector_ran);
size_t GetPercentFree();
@@ -674,6 +693,12 @@ class Heap {
// Push an object onto the allocation stack.
void PushOnAllocationStack(Thread* self, mirror::Object* obj);
+ // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
+ // sweep GC, false for other GC types.
+ bool IsGcConcurrent() const ALWAYS_INLINE {
+ return collector_type_ == kCollectorTypeCMS || collector_type_ == kCollectorTypeCC;
+ }
+
// All-known continuous spaces, where objects lie within fixed bounds.
std::vector<space::ContinuousSpace*> continuous_spaces_;
@@ -716,10 +741,6 @@ class Heap {
// The mem-map which we will use for the non-moving space after the zygote is done forking:
UniquePtr<MemMap> post_zygote_non_moving_space_mem_map_;
- // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
- // sweep GC, false for other GC types.
- bool concurrent_gc_;
-
// The current collector type.
CollectorType collector_type_;
// Which collector we will switch to after zygote fork.
@@ -919,6 +940,7 @@ class Heap {
std::vector<collector::GarbageCollector*> garbage_collectors_;
collector::SemiSpace* semi_space_collector_;
+ collector::ConcurrentCopying* concurrent_copying_collector_;
const bool running_on_valgrind_;
const bool use_tlab_;
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index fcd3b70..6148894 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -104,6 +104,26 @@ void BumpPointerSpace::RevokeAllThreadLocalBuffers() {
}
}
+void BumpPointerSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
+ if (kIsDebugBuild) {
+ MutexLock mu(Thread::Current(), block_lock_);
+ DCHECK(!thread->HasTlab());
+ }
+}
+
+void BumpPointerSpace::AssertAllThreadLocalBuffersAreRevoked() {
+ if (kIsDebugBuild) {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, *Locks::runtime_shutdown_lock_);
+ MutexLock mu2(self, *Locks::thread_list_lock_);
+ // TODO: Not do a copy of the thread list?
+ std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
+ for (Thread* thread : thread_list) {
+ AssertThreadLocalBuffersAreRevoked(thread);
+ }
+ }
+}
+
void BumpPointerSpace::UpdateMainBlock() {
DCHECK_EQ(num_blocks_, 0U);
main_block_size_ = Size();
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 031fccd..3ab5df4 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -103,6 +103,9 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
void RevokeThreadLocalBuffers(Thread* thread) LOCKS_EXCLUDED(block_lock_);
void RevokeAllThreadLocalBuffers() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
Locks::thread_list_lock_);
+ void AssertThreadLocalBuffersAreRevoked(Thread* thread) LOCKS_EXCLUDED(block_lock_);
+ void AssertAllThreadLocalBuffersAreRevoked() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
+ Locks::thread_list_lock_);
uint64_t GetBytesAllocated() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
uint64_t GetObjectsAllocated() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 0597422..30c2edb 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -60,7 +60,7 @@ DlMallocSpace* DlMallocSpace::CreateFromMemMap(MemMap* mem_map, const std::strin
// Everything is set so record in immutable structure and leave
byte* begin = mem_map->Begin();
- if (RUNNING_ON_VALGRIND > 0) {
+ if (Runtime::Current()->GetHeap()->RunningOnValgrind()) {
return new ValgrindMallocSpace<DlMallocSpace, void*>(
name, mem_map, mspace, begin, end, begin + capacity, growth_limit, initial_size);
} else {
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 1ca132e..2fc67ec 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -29,6 +29,50 @@ namespace art {
namespace gc {
namespace space {
+class ValgrindLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
+ public:
+ explicit ValgrindLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) {
+ }
+
+ virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
+ size_t* usable_size) OVERRIDE {
+ mirror::Object* obj =
+ LargeObjectMapSpace::Alloc(self, num_bytes + kValgrindRedZoneBytes * 2, bytes_allocated,
+ usable_size);
+ mirror::Object* object_without_rdz = reinterpret_cast<mirror::Object*>(
+ reinterpret_cast<uintptr_t>(obj) + kValgrindRedZoneBytes);
+ VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<void*>(obj), kValgrindRedZoneBytes);
+ VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(object_without_rdz) + num_bytes,
+ kValgrindRedZoneBytes);
+ if (usable_size != nullptr) {
+ *usable_size = num_bytes; // Since we have redzones, shrink the usable size.
+ }
+ return object_without_rdz;
+ }
+
+ virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
+ mirror::Object* object_with_rdz = reinterpret_cast<mirror::Object*>(
+ reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes);
+ return LargeObjectMapSpace::AllocationSize(object_with_rdz, usable_size);
+ }
+
+ virtual size_t Free(Thread* self, mirror::Object* obj) OVERRIDE {
+ mirror::Object* object_with_rdz = reinterpret_cast<mirror::Object*>(
+ reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes);
+ VALGRIND_MAKE_MEM_UNDEFINED(object_with_rdz, AllocationSize(obj, nullptr));
+ return LargeObjectMapSpace::Free(self, object_with_rdz);
+ }
+
+ bool Contains(const mirror::Object* obj) const OVERRIDE {
+ mirror::Object* object_with_rdz = reinterpret_cast<mirror::Object*>(
+ reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes);
+ return LargeObjectMapSpace::Contains(object_with_rdz);
+ }
+
+ private:
+ static constexpr size_t kValgrindRedZoneBytes = kPageSize;
+};
+
void LargeObjectSpace::SwapBitmaps() {
live_objects_.swap(mark_objects_);
// Swap names to get more descriptive diagnostics.
@@ -53,7 +97,11 @@ LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
lock_("large object map space lock", kAllocSpaceLock) {}
LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
- return new LargeObjectMapSpace(name);
+ if (RUNNING_ON_VALGRIND > 0) {
+ return new ValgrindLargeObjectMapSpace(name);
+ } else {
+ return new LargeObjectMapSpace(name);
+ }
}
mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index b1b0c3c..eb01325 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -91,7 +91,7 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
};
// A discontinuous large object space implemented by individual mmap/munmap calls.
-class LargeObjectMapSpace FINAL : public LargeObjectSpace {
+class LargeObjectMapSpace : public LargeObjectSpace {
public:
// Creates a large object space. Allocations into the large object space use memory maps instead
// of malloc.
@@ -106,7 +106,7 @@ class LargeObjectMapSpace FINAL : public LargeObjectSpace {
// TODO: disabling thread safety analysis as this may be called when we already hold lock_.
bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
- private:
+ protected:
explicit LargeObjectMapSpace(const std::string& name);
virtual ~LargeObjectMapSpace() {}
@@ -115,7 +115,7 @@ class LargeObjectMapSpace FINAL : public LargeObjectSpace {
std::vector<mirror::Object*,
accounting::GcAllocator<mirror::Object*> > large_objects_ GUARDED_BY(lock_);
typedef SafeMap<mirror::Object*, MemMap*, std::less<mirror::Object*>,
- accounting::GcAllocator<std::pair<const mirror::Object*, MemMap*> > > MemMaps;
+ accounting::GcAllocator<std::pair<mirror::Object*, MemMap*> > > MemMaps;
MemMaps mem_maps_ GUARDED_BY(lock_);
};
@@ -150,7 +150,7 @@ class FreeListSpace FINAL : public LargeObjectSpace {
void Dump(std::ostream& os) const;
- private:
+ protected:
static const size_t kAlignment = kPageSize;
class AllocationHeader {
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index 80c7ca7..3c65205 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -65,7 +65,7 @@ RosAllocSpace* RosAllocSpace::CreateFromMemMap(MemMap* mem_map, const std::strin
byte* begin = mem_map->Begin();
// TODO: Fix RosAllocSpace to support valgrind. There is currently some issues with
// AllocationSize caused by redzones. b/12944686
- if (false && RUNNING_ON_VALGRIND > 0) {
+ if (false && Runtime::Current()->GetHeap()->RunningOnValgrind()) {
return new ValgrindMallocSpace<RosAllocSpace, allocator::RosAlloc*>(
name, mem_map, rosalloc, begin, end, begin + capacity, growth_limit, initial_size);
} else {
@@ -308,6 +308,12 @@ void RosAllocSpace::RevokeAllThreadLocalBuffers() {
rosalloc_->RevokeAllThreadLocalRuns();
}
+void RosAllocSpace::AssertAllThreadLocalBuffersAreRevoked() {
+ if (kIsDebugBuild) {
+ rosalloc_->AssertAllThreadLocalRunsAreRevoked();
+ }
+}
+
void RosAllocSpace::Clear() {
madvise(GetMemMap()->Begin(), GetMemMap()->Size(), MADV_DONTNEED);
GetLiveBitmap()->Clear();
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index 9b9adf8..949ec08 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -89,6 +89,7 @@ class RosAllocSpace : public MallocSpace {
void RevokeThreadLocalBuffers(Thread* thread);
void RevokeAllThreadLocalBuffers();
+ void AssertAllThreadLocalBuffersAreRevoked();
// Returns the class of a recently freed object.
mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 89a63ac..525e2b3 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -57,6 +57,19 @@ static bool InstallStubsClassVisitor(mirror::Class* klass, void* arg)
return instrumentation->InstallStubsForClass(klass);
}
+Instrumentation::Instrumentation()
+ : instrumentation_stubs_installed_(false), entry_exit_stubs_installed_(false),
+ interpreter_stubs_installed_(false),
+ interpret_only_(false), forced_interpret_only_(false),
+ have_method_entry_listeners_(false), have_method_exit_listeners_(false),
+ have_method_unwind_listeners_(false), have_dex_pc_listeners_(false),
+ have_exception_caught_listeners_(false),
+ deoptimized_methods_lock_("deoptimized methods lock"),
+ deoptimization_enabled_(false),
+ interpreter_handler_table_(kMainHandlerTable),
+ quick_alloc_entry_points_instrumentation_counter_(0) {
+}
+
bool Instrumentation::InstallStubsForClass(mirror::Class* klass) {
for (size_t i = 0, e = klass->NumDirectMethods(); i < e; i++) {
InstallStubsForMethod(klass->GetDirectMethod(i));
@@ -445,7 +458,12 @@ void Instrumentation::ConfigureStubs(bool require_entry_exit_stubs, bool require
entry_exit_stubs_installed_ = false;
runtime->GetClassLinker()->VisitClasses(InstallStubsClassVisitor, this);
// Restore stack only if there is no method currently deoptimized.
- if (deoptimized_methods_.empty()) {
+ bool empty;
+ {
+ ReaderMutexLock mu(self, deoptimized_methods_lock_);
+ empty = deoptimized_methods_.empty(); // Avoid lock violation.
+ }
+ if (empty) {
instrumentation_stubs_installed_ = false;
MutexLock mu(self, *Locks::thread_list_lock_);
Runtime::Current()->GetThreadList()->ForEach(InstrumentationRestoreStack, this);
@@ -542,7 +560,12 @@ void Instrumentation::Deoptimize(mirror::ArtMethod* method) {
CHECK(!method->IsProxyMethod());
CHECK(!method->IsAbstract());
- std::pair<std::set<mirror::ArtMethod*>::iterator, bool> pair = deoptimized_methods_.insert(method);
+ Thread* self = Thread::Current();
+ std::pair<std::set<mirror::ArtMethod*>::iterator, bool> pair;
+ {
+ WriterMutexLock mu(self, deoptimized_methods_lock_);
+ pair = deoptimized_methods_.insert(method);
+ }
bool already_deoptimized = !pair.second;
CHECK(!already_deoptimized) << "Method " << PrettyMethod(method) << " is already deoptimized";
@@ -553,7 +576,7 @@ void Instrumentation::Deoptimize(mirror::ArtMethod* method) {
// Install instrumentation exit stub and instrumentation frames. We may already have installed
// these previously so it will only cover the newly created frames.
instrumentation_stubs_installed_ = true;
- MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+ MutexLock mu(self, *Locks::thread_list_lock_);
Runtime::Current()->GetThreadList()->ForEach(InstrumentationInstallStack, this);
}
}
@@ -563,9 +586,16 @@ void Instrumentation::Undeoptimize(mirror::ArtMethod* method) {
CHECK(!method->IsProxyMethod());
CHECK(!method->IsAbstract());
- auto it = deoptimized_methods_.find(method);
- CHECK(it != deoptimized_methods_.end()) << "Method " << PrettyMethod(method) << " is not deoptimized";
- deoptimized_methods_.erase(it);
+ Thread* self = Thread::Current();
+ bool empty;
+ {
+ WriterMutexLock mu(self, deoptimized_methods_lock_);
+ auto it = deoptimized_methods_.find(method);
+ CHECK(it != deoptimized_methods_.end()) << "Method " << PrettyMethod(method)
+ << " is not deoptimized";
+ deoptimized_methods_.erase(it);
+ empty = deoptimized_methods_.empty();
+ }
// Restore code and possibly stack only if we did not deoptimize everything.
if (!interpreter_stubs_installed_) {
@@ -583,8 +613,8 @@ void Instrumentation::Undeoptimize(mirror::ArtMethod* method) {
}
// If there is no deoptimized method left, we can restore the stack of each thread.
- if (deoptimized_methods_.empty()) {
- MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+ if (empty) {
+ MutexLock mu(self, *Locks::thread_list_lock_);
Runtime::Current()->GetThreadList()->ForEach(InstrumentationRestoreStack, this);
instrumentation_stubs_installed_ = false;
}
@@ -592,11 +622,13 @@ void Instrumentation::Undeoptimize(mirror::ArtMethod* method) {
}
bool Instrumentation::IsDeoptimized(mirror::ArtMethod* method) const {
+ ReaderMutexLock mu(Thread::Current(), deoptimized_methods_lock_);
DCHECK(method != nullptr);
- return deoptimized_methods_.count(method);
+ return deoptimized_methods_.find(method) != deoptimized_methods_.end();
}
void Instrumentation::EnableDeoptimization() {
+ ReaderMutexLock mu(Thread::Current(), deoptimized_methods_lock_);
CHECK(deoptimized_methods_.empty());
CHECK_EQ(deoptimization_enabled_, false);
deoptimization_enabled_ = true;
@@ -609,11 +641,17 @@ void Instrumentation::DisableDeoptimization() {
UndeoptimizeEverything();
}
// Undeoptimized selected methods.
- while (!deoptimized_methods_.empty()) {
- auto it_begin = deoptimized_methods_.begin();
- Undeoptimize(*it_begin);
+ while (true) {
+ mirror::ArtMethod* method;
+ {
+ ReaderMutexLock mu(Thread::Current(), deoptimized_methods_lock_);
+ if (deoptimized_methods_.empty()) {
+ break;
+ }
+ method = *deoptimized_methods_.begin();
+ }
+ Undeoptimize(method);
}
- CHECK(deoptimized_methods_.empty());
deoptimization_enabled_ = false;
}
@@ -827,6 +865,20 @@ void Instrumentation::PopMethodForUnwind(Thread* self, bool is_deoptimization) c
}
}
+void Instrumentation::VisitRoots(RootCallback* callback, void* arg) {
+ WriterMutexLock mu(Thread::Current(), deoptimized_methods_lock_);
+ if (deoptimized_methods_.empty()) {
+ return;
+ }
+ std::set<mirror::ArtMethod*> new_deoptimized_methods;
+ for (mirror::ArtMethod* method : deoptimized_methods_) {
+ DCHECK(method != nullptr);
+ callback(reinterpret_cast<mirror::Object**>(&method), arg, 0, kRootVMInternal);
+ new_deoptimized_methods.insert(method);
+ }
+ deoptimized_methods_ = new_deoptimized_methods;
+}
+
std::string InstrumentationStackFrame::Dump() const {
std::ostringstream os;
os << "Frame " << frame_id_ << " " << PrettyMethod(method_) << ":"
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index e9356e0..2a9c35f 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -20,6 +20,7 @@
#include "atomic.h"
#include "base/macros.h"
#include "base/mutex.h"
+#include "object_callbacks.h"
#include <stdint.h>
#include <set>
@@ -98,16 +99,7 @@ class Instrumentation {
kExceptionCaught = 16
};
- Instrumentation() :
- instrumentation_stubs_installed_(false), entry_exit_stubs_installed_(false),
- interpreter_stubs_installed_(false),
- interpret_only_(false), forced_interpret_only_(false),
- have_method_entry_listeners_(false), have_method_exit_listeners_(false),
- have_method_unwind_listeners_(false), have_dex_pc_listeners_(false),
- have_exception_caught_listeners_(false),
- deoptimization_enabled_(false),
- interpreter_handler_table_(kMainHandlerTable),
- quick_alloc_entry_points_instrumentation_counter_(0) {}
+ Instrumentation();
// Add a listener to be notified of the masked together sent of instrumentation events. This
// suspend the runtime to install stubs. You are expected to hold the mutator lock as a proxy
@@ -123,8 +115,15 @@ class Instrumentation {
LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_);
// Deoptimization.
- void EnableDeoptimization() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
- void DisableDeoptimization() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void EnableDeoptimization()
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+ LOCKS_EXCLUDED(deoptimized_methods_lock_);
+ void DisableDeoptimization()
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+ LOCKS_EXCLUDED(deoptimized_methods_lock_);
+ bool AreAllMethodsDeoptimized() const {
+ return interpreter_stubs_installed_;
+ }
bool ShouldNotifyMethodEnterExitEvents() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Executes everything with interpreter.
@@ -141,17 +140,17 @@ class Instrumentation {
// method (except a class initializer) set to the resolution trampoline will be deoptimized only
// once its declaring class is initialized.
void Deoptimize(mirror::ArtMethod* method)
- LOCKS_EXCLUDED(Locks::thread_list_lock_)
+ LOCKS_EXCLUDED(Locks::thread_list_lock_, deoptimized_methods_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
// Undeoptimze the method by restoring its entrypoints. Nevertheless, a static method
// (except a class initializer) set to the resolution trampoline will be updated only once its
// declaring class is initialized.
void Undeoptimize(mirror::ArtMethod* method)
- LOCKS_EXCLUDED(Locks::thread_list_lock_)
+ LOCKS_EXCLUDED(Locks::thread_list_lock_, deoptimized_methods_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsDeoptimized(mirror::ArtMethod* method) const;
+ bool IsDeoptimized(mirror::ArtMethod* method) const LOCKS_EXCLUDED(deoptimized_methods_lock_);
// Enable method tracing by installing instrumentation entry/exit stubs.
void EnableMethodTracing()
@@ -286,11 +285,15 @@ class Instrumentation {
void InstallStubsForMethod(mirror::ArtMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void VisitRoots(RootCallback* callback, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ LOCKS_EXCLUDED(deoptimized_methods_lock_);
+
private:
// Does the job of installing or removing instrumentation code within methods.
void ConfigureStubs(bool require_entry_exit_stubs, bool require_interpreter)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_);
+ LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_,
+ deoptimized_methods_lock_);
void UpdateInterpreterHandlerTable() {
interpreter_handler_table_ = IsActive() ? kAlternativeHandlerTable : kMainHandlerTable;
@@ -354,8 +357,8 @@ class Instrumentation {
// The set of methods being deoptimized (by the debugger) which must be executed with interpreter
// only.
- // TODO we need to visit these methods as roots.
- std::set<mirror::ArtMethod*> deoptimized_methods_;
+ mutable ReaderWriterMutex deoptimized_methods_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ std::set<mirror::ArtMethod*> deoptimized_methods_ GUARDED_BY(deoptimized_methods_lock_);
bool deoptimization_enabled_;
// Current interpreter handler table. This is updated each time the thread state flags are
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 524798d..dfc82dd 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -242,15 +242,15 @@ mirror::String* InternTable::Insert(mirror::String* s, bool is_strong) {
return InsertWeak(s, hash_code);
}
-mirror::String* InternTable::InternStrong(int32_t utf16_length,
- const char* utf8_data) {
+mirror::String* InternTable::InternStrong(int32_t utf16_length, const char* utf8_data) {
+ DCHECK(utf8_data != nullptr);
return InternStrong(mirror::String::AllocFromModifiedUtf8(
Thread::Current(), utf16_length, utf8_data));
}
mirror::String* InternTable::InternStrong(const char* utf8_data) {
- return InternStrong(
- mirror::String::AllocFromModifiedUtf8(Thread::Current(), utf8_data));
+ DCHECK(utf8_data != nullptr);
+ return InternStrong(mirror::String::AllocFromModifiedUtf8(Thread::Current(), utf8_data));
}
mirror::String* InternTable::InternStrong(mirror::String* s) {
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index e8cea9d..297f1a8 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -30,9 +30,10 @@ static inline void AssignRegister(ShadowFrame* new_shadow_frame, const ShadowFra
size_t dest_reg, size_t src_reg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// If both register locations contains the same value, the register probably holds a reference.
- int32_t src_value = shadow_frame.GetVReg(src_reg);
+ // Uint required, so that sign extension does not make this wrong on 64b systems
+ uint32_t src_value = shadow_frame.GetVReg(src_reg);
mirror::Object* o = shadow_frame.GetVRegReference<kVerifyNone>(src_reg);
- if (src_value == reinterpret_cast<intptr_t>(o)) {
+ if (src_value == reinterpret_cast<uintptr_t>(o)) {
new_shadow_frame->SetVRegReference(dest_reg, o);
} else {
new_shadow_frame->SetVReg(dest_reg, src_value);
diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h
index 4c17c96..66ebb96 100644
--- a/runtime/jdwp/jdwp.h
+++ b/runtime/jdwp/jdwp.h
@@ -335,12 +335,10 @@ struct JdwpState {
AtomicInteger event_serial_;
// Linked list of events requested by the debugger (breakpoints, class prep, etc).
- Mutex event_list_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ Mutex event_list_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_BEFORE(Locks::breakpoint_lock_);
JdwpEvent* event_list_ GUARDED_BY(event_list_lock_);
size_t event_list_size_ GUARDED_BY(event_list_lock_); // Number of elements in event_list_.
- size_t full_deoptimization_requests_ GUARDED_BY(event_list_lock_); // Number of events requiring
- // full deoptimization.
// Used to synchronize suspension of the event thread (to avoid receiving "resume"
// events before the thread has finished suspending itself).
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index 427350e..9b3ea2e 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -163,11 +163,12 @@ JdwpError JdwpState::RegisterEvent(JdwpEvent* pEvent) {
* If one or more "break"-type mods are used, register them with
* the interpreter.
*/
+ DeoptimizationRequest req;
for (int i = 0; i < pEvent->modCount; i++) {
const JdwpEventMod* pMod = &pEvent->mods[i];
if (pMod->modKind == MK_LOCATION_ONLY) {
/* should only be for Breakpoint, Step, and Exception */
- Dbg::WatchLocation(&pMod->locationOnly.loc);
+ Dbg::WatchLocation(&pMod->locationOnly.loc, &req);
} else if (pMod->modKind == MK_STEP) {
/* should only be for EK_SINGLE_STEP; should only be one */
JdwpStepSize size = static_cast<JdwpStepSize>(pMod->step.size);
@@ -181,6 +182,11 @@ JdwpError JdwpState::RegisterEvent(JdwpEvent* pEvent) {
dumpEvent(pEvent); /* TODO - need for field watches */
}
}
+ if (NeedsFullDeoptimization(pEvent->eventKind)) {
+ CHECK_EQ(req.kind, DeoptimizationRequest::kNothing);
+ CHECK(req.method == nullptr);
+ req.kind = DeoptimizationRequest::kFullDeoptimization;
+ }
{
/*
@@ -193,19 +199,11 @@ JdwpError JdwpState::RegisterEvent(JdwpEvent* pEvent) {
}
event_list_ = pEvent;
++event_list_size_;
-
- /**
- * Do we need to enable full deoptimization ?
- */
- if (NeedsFullDeoptimization(pEvent->eventKind)) {
- if (full_deoptimization_requests_ == 0) {
- // This is the first event that needs full deoptimization: enable it.
- Dbg::EnableFullDeoptimization();
- }
- ++full_deoptimization_requests_;
- }
}
+ // TODO we can do better job here since we should process only one request: the one we just
+ // created.
+ Dbg::RequestDeoptimization(req);
Dbg::ManageDeoptimization();
return ERR_NONE;
@@ -238,31 +236,28 @@ void JdwpState::UnregisterEvent(JdwpEvent* pEvent) {
/*
* Unhook us from the interpreter, if necessary.
*/
+ DeoptimizationRequest req;
for (int i = 0; i < pEvent->modCount; i++) {
JdwpEventMod* pMod = &pEvent->mods[i];
if (pMod->modKind == MK_LOCATION_ONLY) {
/* should only be for Breakpoint, Step, and Exception */
- Dbg::UnwatchLocation(&pMod->locationOnly.loc);
+ Dbg::UnwatchLocation(&pMod->locationOnly.loc, &req);
}
if (pMod->modKind == MK_STEP) {
/* should only be for EK_SINGLE_STEP; should only be one */
Dbg::UnconfigureStep(pMod->step.threadId);
}
}
+ if (NeedsFullDeoptimization(pEvent->eventKind)) {
+ CHECK_EQ(req.kind, DeoptimizationRequest::kNothing);
+ CHECK(req.method == nullptr);
+ req.kind = DeoptimizationRequest::kFullUndeoptimization;
+ }
--event_list_size_;
CHECK(event_list_size_ != 0 || event_list_ == NULL);
- /**
- * Can we disable full deoptimization ?
- */
- if (NeedsFullDeoptimization(pEvent->eventKind)) {
- --full_deoptimization_requests_;
- if (full_deoptimization_requests_ == 0) {
- // We no longer need full deoptimization.
- Dbg::DisableFullDeoptimization();
- }
- }
+ Dbg::RequestDeoptimization(req);
}
/*
diff --git a/runtime/jdwp/jdwp_main.cc b/runtime/jdwp/jdwp_main.cc
index 77c963f..5fc0228 100644
--- a/runtime/jdwp/jdwp_main.cc
+++ b/runtime/jdwp/jdwp_main.cc
@@ -215,7 +215,6 @@ JdwpState::JdwpState(const JdwpOptions* options)
event_list_lock_("JDWP event list lock", kJdwpEventListLock),
event_list_(NULL),
event_list_size_(0),
- full_deoptimization_requests_(0),
event_thread_lock_("JDWP event thread lock"),
event_thread_cond_("JDWP event thread condition variable", event_thread_lock_),
event_thread_id_(0),
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 43db7ec..13aa77f 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -550,24 +550,16 @@ class JNI {
return soa.AddLocalReference<jclass>(c);
}
- static jmethodID FromReflectedMethod(JNIEnv* env, jobject java_method) {
- CHECK_NON_NULL_ARGUMENT(FromReflectedMethod, java_method);
+ static jmethodID FromReflectedMethod(JNIEnv* env, jobject jlr_method) {
+ CHECK_NON_NULL_ARGUMENT(FromReflectedMethod, jlr_method);
ScopedObjectAccess soa(env);
- jobject art_method = env->GetObjectField(
- java_method, WellKnownClasses::java_lang_reflect_AbstractMethod_artMethod);
- mirror::ArtMethod* method = soa.Decode<mirror::ArtMethod*>(art_method);
- DCHECK(method != nullptr);
- return soa.EncodeMethod(method);
+ return soa.EncodeMethod(mirror::ArtMethod::FromReflectedMethod(soa, jlr_method));
}
- static jfieldID FromReflectedField(JNIEnv* env, jobject java_field) {
- CHECK_NON_NULL_ARGUMENT(FromReflectedField, java_field);
+ static jfieldID FromReflectedField(JNIEnv* env, jobject jlr_field) {
+ CHECK_NON_NULL_ARGUMENT(FromReflectedField, jlr_field);
ScopedObjectAccess soa(env);
- jobject art_field = env->GetObjectField(java_field,
- WellKnownClasses::java_lang_reflect_Field_artField);
- mirror::ArtField* field = soa.Decode<mirror::ArtField*>(art_field);
- DCHECK(field != nullptr);
- return soa.EncodeField(field);
+ return soa.EncodeField(mirror::ArtField::FromReflectedField(soa, jlr_field));
}
static jobject ToReflectedMethod(JNIEnv* env, jclass, jmethodID mid, jboolean) {
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 5647d93..1594338 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -47,7 +47,10 @@ static std::ostream& operator<<(
}
#if defined(__LP64__) && !defined(__x86_64__)
-uintptr_t MemMap::next_mem_pos_ = kPageSize * 2; // first page to check for low-mem extent
+// Where to start with low memory allocation.
+static constexpr uintptr_t LOW_MEM_START = kPageSize * 2;
+
+uintptr_t MemMap::next_mem_pos_ = LOW_MEM_START; // first page to check for low-mem extent
#endif
static bool CheckMapRequest(byte* expected_ptr, void* actual_ptr, size_t byte_count,
@@ -122,6 +125,9 @@ MemMap* MemMap::MapAnonymous(const char* name, byte* expected, size_t byte_count
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
#endif
+ // We need to store and potentially set an error number for pretty printing of errors
+ int saved_errno = 0;
+
// TODO:
// A page allocator would be a useful abstraction here, as
// 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us
@@ -129,11 +135,25 @@ MemMap* MemMap::MapAnonymous(const char* name, byte* expected, size_t byte_count
#if defined(__LP64__) && !defined(__x86_64__)
// MAP_32BIT only available on x86_64.
void* actual = MAP_FAILED;
- std::string strerr;
if (low_4gb && expected == nullptr) {
flags |= MAP_FIXED;
+ bool first_run = true;
+
for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
+ if (4U * GB - ptr < page_aligned_byte_count) {
+ // Not enough memory until 4GB.
+ if (first_run) {
+ // Try another time from the bottom;
+ ptr = LOW_MEM_START - kPageSize;
+ first_run = false;
+ continue;
+ } else {
+ // Second try failed.
+ break;
+ }
+ }
+
uintptr_t tail_ptr;
// Check pages are free.
@@ -162,11 +182,12 @@ MemMap* MemMap::MapAnonymous(const char* name, byte* expected, size_t byte_count
}
if (actual == MAP_FAILED) {
- strerr = "Could not find contiguous low-memory space.";
+ LOG(ERROR) << "Could not find contiguous low-memory space.";
+ saved_errno = ENOMEM;
}
} else {
actual = mmap(expected, page_aligned_byte_count, prot, flags, fd.get(), 0);
- strerr = strerror(errno);
+ saved_errno = errno;
}
#else
@@ -177,15 +198,16 @@ MemMap* MemMap::MapAnonymous(const char* name, byte* expected, size_t byte_count
#endif
void* actual = mmap(expected, page_aligned_byte_count, prot, flags, fd.get(), 0);
- std::string strerr(strerror(errno));
+ saved_errno = errno;
#endif
if (actual == MAP_FAILED) {
std::string maps;
ReadFileToString("/proc/self/maps", &maps);
+
*error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s\n%s",
expected, page_aligned_byte_count, prot, flags, fd.get(),
- strerr.c_str(), maps.c_str());
+ strerror(saved_errno), maps.c_str());
return nullptr;
}
std::ostringstream check_map_request_error_msg;
@@ -229,15 +251,17 @@ MemMap* MemMap::MapFileAtAddress(byte* expected, size_t byte_count, int prot, in
flags,
fd,
page_aligned_offset));
- std::string strerr(strerror(errno));
if (actual == MAP_FAILED) {
+ auto saved_errno = errno;
+
std::string maps;
ReadFileToString("/proc/self/maps", &maps);
+
*error_msg = StringPrintf("mmap(%p, %zd, 0x%x, 0x%x, %d, %" PRId64
") of file '%s' failed: %s\n%s",
page_aligned_expected, page_aligned_byte_count, prot, flags, fd,
- static_cast<int64_t>(page_aligned_offset), filename, strerr.c_str(),
- maps.c_str());
+ static_cast<int64_t>(page_aligned_offset), filename,
+ strerror(saved_errno), maps.c_str());
return nullptr;
}
std::ostringstream check_map_request_error_msg;
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 1d37775..3d2fd7b 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -27,10 +27,6 @@
namespace art {
namespace mirror {
-static inline size_t HeaderSize(size_t component_size) {
- return sizeof(Object) + (component_size == sizeof(int64_t) ? 8 : 4);
-}
-
template<VerifyObjectFlags kVerifyFlags>
inline size_t Array::SizeOf() {
// This is safe from overflow because the array was already allocated, so we know it's sane.
@@ -38,7 +34,7 @@ inline size_t Array::SizeOf() {
// Don't need to check this since we already check this in GetClass.
int32_t component_count =
GetLength<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>();
- size_t header_size = HeaderSize(component_size);
+ size_t header_size = DataOffset(component_size).SizeValue();
size_t data_size = component_count * component_size;
return header_size + data_size;
}
@@ -50,7 +46,7 @@ static inline size_t ComputeArraySize(Thread* self, Class* array_class, int32_t
DCHECK_GE(component_count, 0);
DCHECK(array_class->IsArrayClass());
- size_t header_size = HeaderSize(component_size);
+ size_t header_size = Array::DataOffset(component_size).SizeValue();
size_t data_size = component_count * component_size;
size_t size = header_size + data_size;
@@ -134,7 +130,7 @@ inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_c
heap->AllocObjectWithAllocator<kIsInstrumented, true>(self, array_class, size,
allocator_type, visitor));
} else {
- SetLengthToUsableSizeVisitor visitor(component_count, HeaderSize(component_size),
+ SetLengthToUsableSizeVisitor visitor(component_count, DataOffset(component_size).SizeValue(),
component_size);
result = down_cast<Array*>(
heap->AllocObjectWithAllocator<kIsInstrumented, true>(self, array_class, size,
@@ -159,8 +155,16 @@ inline void PrimitiveArray<T>::VisitRoots(RootCallback* callback, void* arg) {
}
}
-// Similar to memmove except elements are of aligned appropriately for T, count is in T sized units
-// copies are guaranteed not to tear when T is less-than 64bit.
+template<typename T>
+inline PrimitiveArray<T>* PrimitiveArray<T>::Alloc(Thread* self, size_t length) {
+ DCHECK(array_class_ != NULL);
+ Array* raw_array = Array::Alloc<true>(self, array_class_, length, sizeof(T),
+ Runtime::Current()->GetHeap()->GetCurrentAllocator());
+ return down_cast<PrimitiveArray<T>*>(raw_array);
+}
+
+// Backward copy where elements are of aligned appropriately for T. Count is in T sized units.
+// Copies are guaranteed not to tear when the sizeof T is less-than 64bit.
template<typename T>
static inline void ArrayBackwardCopy(T* d, const T* s, int32_t count) {
d += count;
@@ -172,12 +176,15 @@ static inline void ArrayBackwardCopy(T* d, const T* s, int32_t count) {
}
}
+// Forward copy where elements are of aligned appropriately for T. Count is in T sized units.
+// Copies are guaranteed not to tear when the sizeof T is less-than 64bit.
template<typename T>
-inline PrimitiveArray<T>* PrimitiveArray<T>::Alloc(Thread* self, size_t length) {
- DCHECK(array_class_ != NULL);
- Array* raw_array = Array::Alloc<true>(self, array_class_, length, sizeof(T),
- Runtime::Current()->GetHeap()->GetCurrentAllocator());
- return down_cast<PrimitiveArray<T>*>(raw_array);
+static inline void ArrayForwardCopy(T* d, const T* s, int32_t count) {
+ for (int32_t i = 0; i < count; ++i) {
+ *d = *s;
+ d++;
+ s++;
+ }
}
template<class T>
@@ -197,47 +204,49 @@ inline void PrimitiveArray<T>::Memmove(int32_t dst_pos, PrimitiveArray<T>* src,
// Note for non-byte copies we can't rely on standard libc functions like memcpy(3) and memmove(3)
// in our implementation, because they may copy byte-by-byte.
- if (LIKELY(src != this) || (dst_pos < src_pos) || (dst_pos - src_pos >= count)) {
- // Forward copy ok.
+ if (LIKELY(src != this)) {
+ // Memcpy ok for guaranteed non-overlapping distinct arrays.
Memcpy(dst_pos, src, src_pos, count);
} else {
- // Backward copy necessary.
+ // Handle copies within the same array using the appropriate direction copy.
void* dst_raw = GetRawData(sizeof(T), dst_pos);
const void* src_raw = src->GetRawData(sizeof(T), src_pos);
if (sizeof(T) == sizeof(uint8_t)) {
- // TUNING: use memmove here?
uint8_t* d = reinterpret_cast<uint8_t*>(dst_raw);
const uint8_t* s = reinterpret_cast<const uint8_t*>(src_raw);
- ArrayBackwardCopy<uint8_t>(d, s, count);
- } else if (sizeof(T) == sizeof(uint16_t)) {
- uint16_t* d = reinterpret_cast<uint16_t*>(dst_raw);
- const uint16_t* s = reinterpret_cast<const uint16_t*>(src_raw);
- ArrayBackwardCopy<uint16_t>(d, s, count);
- } else if (sizeof(T) == sizeof(uint32_t)) {
- uint32_t* d = reinterpret_cast<uint32_t*>(dst_raw);
- const uint32_t* s = reinterpret_cast<const uint32_t*>(src_raw);
- ArrayBackwardCopy<uint32_t>(d, s, count);
+ memmove(d, s, count);
} else {
- DCHECK_EQ(sizeof(T), sizeof(uint64_t));
- uint64_t* d = reinterpret_cast<uint64_t*>(dst_raw);
- const uint64_t* s = reinterpret_cast<const uint64_t*>(src_raw);
- ArrayBackwardCopy<uint64_t>(d, s, count);
+ const bool copy_forward = (dst_pos < src_pos) || (dst_pos - src_pos >= count);
+ if (sizeof(T) == sizeof(uint16_t)) {
+ uint16_t* d = reinterpret_cast<uint16_t*>(dst_raw);
+ const uint16_t* s = reinterpret_cast<const uint16_t*>(src_raw);
+ if (copy_forward) {
+ ArrayForwardCopy<uint16_t>(d, s, count);
+ } else {
+ ArrayBackwardCopy<uint16_t>(d, s, count);
+ }
+ } else if (sizeof(T) == sizeof(uint32_t)) {
+ uint32_t* d = reinterpret_cast<uint32_t*>(dst_raw);
+ const uint32_t* s = reinterpret_cast<const uint32_t*>(src_raw);
+ if (copy_forward) {
+ ArrayForwardCopy<uint32_t>(d, s, count);
+ } else {
+ ArrayBackwardCopy<uint32_t>(d, s, count);
+ }
+ } else {
+ DCHECK_EQ(sizeof(T), sizeof(uint64_t));
+ uint64_t* d = reinterpret_cast<uint64_t*>(dst_raw);
+ const uint64_t* s = reinterpret_cast<const uint64_t*>(src_raw);
+ if (copy_forward) {
+ ArrayForwardCopy<uint64_t>(d, s, count);
+ } else {
+ ArrayBackwardCopy<uint64_t>(d, s, count);
+ }
+ }
}
}
}
-// Similar to memcpy except elements are of aligned appropriately for T, count is in T sized units
-// copies are guaranteed not to tear when T is less-than 64bit.
-template<typename T>
-static inline void ArrayForwardCopy(T* d, const T* s, int32_t count) {
- for (int32_t i = 0; i < count; ++i) {
- *d = *s;
- d++;
- s++;
- }
-}
-
-
template<class T>
inline void PrimitiveArray<T>::Memcpy(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos,
int32_t count) {
diff --git a/runtime/mirror/art_field.cc b/runtime/mirror/art_field.cc
index 7740213..f91cab1 100644
--- a/runtime/mirror/art_field.cc
+++ b/runtime/mirror/art_field.cc
@@ -21,7 +21,9 @@
#include "object-inl.h"
#include "object_utils.h"
#include "runtime.h"
+#include "scoped_thread_state_change.h"
#include "utils.h"
+#include "well_known_classes.h"
namespace art {
namespace mirror {
@@ -29,6 +31,13 @@ namespace mirror {
// TODO: get global references for these
Class* ArtField::java_lang_reflect_ArtField_ = NULL;
+ArtField* ArtField::FromReflectedField(const ScopedObjectAccess& soa, jobject jlr_field) {
+ mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_reflect_Field_artField);
+ mirror::ArtField* field = f->GetObject(soa.Decode<mirror::Object*>(jlr_field))->AsArtField();
+ DCHECK(field != nullptr);
+ return field;
+}
+
void ArtField::SetClass(Class* java_lang_reflect_ArtField) {
CHECK(java_lang_reflect_ArtField_ == NULL);
CHECK(java_lang_reflect_ArtField != NULL);
diff --git a/runtime/mirror/art_field.h b/runtime/mirror/art_field.h
index 46287c3..0daa838 100644
--- a/runtime/mirror/art_field.h
+++ b/runtime/mirror/art_field.h
@@ -25,12 +25,16 @@
namespace art {
struct ArtFieldOffsets;
+class ScopedObjectAccess;
namespace mirror {
// C++ mirror of java.lang.reflect.ArtField
class MANAGED ArtField : public Object {
public:
+ static ArtField* FromReflectedField(const ScopedObjectAccess& soa, jobject jlr_field)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetDeclaringClass(Class *new_declaring_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index 7814f36..ee5a0a4 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -16,6 +16,7 @@
#include "art_method.h"
+#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/stringpiece.h"
#include "class-inl.h"
@@ -28,8 +29,10 @@
#include "object-inl.h"
#include "object_array.h"
#include "object_array-inl.h"
+#include "scoped_thread_state_change.h"
#include "string.h"
#include "object_utils.h"
+#include "well_known_classes.h"
namespace art {
namespace mirror {
@@ -45,6 +48,15 @@ extern "C" void art_quick_invoke_static_stub(ArtMethod*, uint32_t*, uint32_t, Th
// TODO: get global references for these
Class* ArtMethod::java_lang_reflect_ArtMethod_ = NULL;
+ArtMethod* ArtMethod::FromReflectedMethod(const ScopedObjectAccess& soa, jobject jlr_method) {
+ mirror::ArtField* f =
+ soa.DecodeField(WellKnownClasses::java_lang_reflect_AbstractMethod_artMethod);
+ mirror::ArtMethod* method = f->GetObject(soa.Decode<mirror::Object*>(jlr_method))->AsArtMethod();
+ DCHECK(method != nullptr);
+ return method;
+}
+
+
void ArtMethod::VisitRoots(RootCallback* callback, void* arg) {
if (java_lang_reflect_ArtMethod_ != nullptr) {
callback(reinterpret_cast<mirror::Object**>(&java_lang_reflect_ArtMethod_), arg, 0,
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index c654933..fd5ac19 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -31,6 +31,7 @@ struct ConstructorMethodOffsets;
union JValue;
struct MethodClassOffsets;
class MethodHelper;
+class ScopedObjectAccess;
class StringPiece;
class ShadowFrame;
@@ -44,6 +45,9 @@ typedef void (EntryPointFromInterpreter)(Thread* self, MethodHelper& mh,
// C++ mirror of java.lang.reflect.Method and java.lang.reflect.Constructor
class MANAGED ArtMethod : public Object {
public:
+ static ArtMethod* FromReflectedMethod(const ScopedObjectAccess& soa, jobject jlr_method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetDeclaringClass(Class *new_declaring_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index e3f4eed..89d9241 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -465,6 +465,12 @@ inline Object* Class::AllocNonMovableObject(Thread* self) {
return Alloc<true>(self, Runtime::Current()->GetHeap()->GetCurrentNonMovingAllocator());
}
+template <bool kVisitClass, typename Visitor>
+inline void Class::VisitReferences(mirror::Class* klass, const Visitor& visitor) {
+ VisitInstanceFieldsReferences<kVisitClass>(klass, visitor);
+ VisitStaticFieldsReferences<kVisitClass>(this, visitor);
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 76ab94c..ddc07ff 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -861,6 +861,10 @@ class MANAGED Class : public Object {
// When class is verified, set the kAccPreverified flag on each method.
void SetPreverifiedFlagOnAllMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template <bool kVisitClass, typename Visitor>
+ void VisitReferences(mirror::Class* klass, const Visitor& visitor)
+ NO_THREAD_SAFETY_ANALYSIS;
+
private:
void SetVerifyErrorClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index cad1017..527b8a6 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -557,6 +557,77 @@ inline bool Object::CasFieldObject(MemberOffset field_offset, Object* old_value,
return success;
}
+template<bool kVisitClass, bool kIsStatic, typename Visitor>
+inline void Object::VisitFieldsReferences(uint32_t ref_offsets, const Visitor& visitor) {
+ if (LIKELY(ref_offsets != CLASS_WALK_SUPER)) {
+ if (!kVisitClass) {
+ // Mask out the class from the reference offsets.
+ ref_offsets ^= kWordHighBitMask;
+ }
+ DCHECK_EQ(ClassOffset().Uint32Value(), 0U);
+ // Found a reference offset bitmap. Visit the specified offsets.
+ while (ref_offsets != 0) {
+ size_t right_shift = CLZ(ref_offsets);
+ MemberOffset field_offset = CLASS_OFFSET_FROM_CLZ(right_shift);
+ visitor(this, field_offset, kIsStatic);
+ ref_offsets &= ~(CLASS_HIGH_BIT >> right_shift);
+ }
+ } else {
+ // There is no reference offset bitmap. In the non-static case, walk up the class
+ // inheritance hierarchy and find reference offsets the hard way. In the static case, just
+ // consider this class.
+ for (mirror::Class* klass = kIsStatic ? AsClass() : GetClass(); klass != nullptr;
+ klass = kIsStatic ? nullptr : klass->GetSuperClass()) {
+ size_t num_reference_fields =
+ kIsStatic ? klass->NumReferenceStaticFields() : klass->NumReferenceInstanceFields();
+ for (size_t i = 0; i < num_reference_fields; ++i) {
+ mirror::ArtField* field = kIsStatic ? klass->GetStaticField(i)
+ : klass->GetInstanceField(i);
+ MemberOffset field_offset = field->GetOffset();
+ // TODO: Do a simpler check?
+ if (!kVisitClass && UNLIKELY(field_offset.Uint32Value() == ClassOffset().Uint32Value())) {
+ continue;
+ }
+ visitor(this, field_offset, kIsStatic);
+ }
+ }
+ }
+}
+
+template<bool kVisitClass, typename Visitor>
+inline void Object::VisitInstanceFieldsReferences(mirror::Class* klass, const Visitor& visitor) {
+ VisitFieldsReferences<kVisitClass, false>(
+ klass->GetReferenceInstanceOffsets<kVerifyNone>(), visitor);
+}
+
+template<bool kVisitClass, typename Visitor>
+inline void Object::VisitStaticFieldsReferences(mirror::Class* klass, const Visitor& visitor) {
+ klass->VisitFieldsReferences<kVisitClass, true>(
+ klass->GetReferenceStaticOffsets<kVerifyNone>(), visitor);
+}
+
+template <const bool kVisitClass, VerifyObjectFlags kVerifyFlags, typename Visitor,
+ typename JavaLangRefVisitor>
+inline void Object::VisitReferences(const Visitor& visitor,
+ const JavaLangRefVisitor& ref_visitor) {
+ mirror::Class* klass = GetClass<kVerifyFlags>();
+ if (UNLIKELY(klass == Class::GetJavaLangClass())) {
+ DCHECK_EQ(klass->GetClass<kVerifyNone>(), Class::GetJavaLangClass());
+ AsClass<kVerifyNone>()->VisitReferences<kVisitClass>(klass, visitor);
+ } else if (UNLIKELY(klass->IsArrayClass<kVerifyFlags>())) {
+ if (klass->IsObjectArrayClass<kVerifyNone>()) {
+ AsObjectArray<mirror::Object, kVerifyNone>()->VisitReferences<kVisitClass>(visitor);
+ } else if (kVisitClass) {
+ visitor(this, ClassOffset(), false);
+ }
+ } else {
+ VisitInstanceFieldsReferences<kVisitClass>(klass, visitor);
+ if (UNLIKELY(klass->IsReferenceClass<kVerifyNone>())) {
+ ref_visitor(klass, AsReference());
+ }
+ }
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 476259f..0a77828 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -240,6 +240,14 @@ class MANAGED LOCKABLE Object {
#endif
}
+ // TODO fix thread safety analysis broken by the use of template. This should be
+ // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
+ template <const bool kVisitClass, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ typename Visitor, typename JavaLangRefVisitor = VoidFunctor>
+ void VisitReferences(const Visitor& visitor,
+ const JavaLangRefVisitor& ref_visitor = VoidFunctor())
+ NO_THREAD_SAFETY_ANALYSIS;
+
protected:
// Accessors for non-Java type fields
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -251,6 +259,17 @@ class MANAGED LOCKABLE Object {
#endif
}
+ // TODO: Fixme when anotatalysis works with visitors.
+ template<bool kVisitClass, bool kIsStatic, typename Visitor>
+ void VisitFieldsReferences(uint32_t ref_offsets, const Visitor& visitor)
+ NO_THREAD_SAFETY_ANALYSIS;
+ template<bool kVisitClass, typename Visitor>
+ void VisitInstanceFieldsReferences(mirror::Class* klass, const Visitor& visitor)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<bool kVisitClass, typename Visitor>
+ void VisitStaticFieldsReferences(mirror::Class* klass, const Visitor& visitor)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
private:
// Verify the type correctness of stores to fields.
void CheckFieldAssignmentImpl(MemberOffset field_offset, Object* new_value)
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index a427957..8032cc3 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -233,6 +233,17 @@ inline MemberOffset ObjectArray<T>::OffsetOfElement(int32_t i) {
(i * sizeof(HeapReference<Object>)));
}
+template<class T> template<const bool kVisitClass, typename Visitor>
+void ObjectArray<T>::VisitReferences(const Visitor& visitor) {
+ if (kVisitClass) {
+ visitor(this, ClassOffset(), false);
+ }
+ const size_t length = static_cast<size_t>(GetLength());
+ for (size_t i = 0; i < length; ++i) {
+ visitor(this, OffsetOfElement(i), false);
+ }
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/object_array.h b/runtime/mirror/object_array.h
index 7f9e716..5ff0490 100644
--- a/runtime/mirror/object_array.h
+++ b/runtime/mirror/object_array.h
@@ -78,6 +78,11 @@ class MANAGED ObjectArray : public Array {
ObjectArray<T>* CopyOf(Thread* self, int32_t new_length)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // TODO fix thread safety analysis broken by the use of template. This should be
+ // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
+ template<const bool kVisitClass, typename Visitor>
+ void VisitReferences(const Visitor& visitor) NO_THREAD_SAFETY_ANALYSIS;
+
private:
static MemberOffset OffsetOfElement(int32_t i);
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index 3f35210..d4f11b2 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -138,9 +138,7 @@ String* String::AllocFromUtf16(Thread* self,
}
String* String::AllocFromModifiedUtf8(Thread* self, const char* utf) {
- if (UNLIKELY(utf == nullptr)) {
- return nullptr;
- }
+ DCHECK(utf != nullptr);
size_t char_count = CountModifiedUtf8Chars(utf);
return AllocFromModifiedUtf8(self, char_count, utf);
}
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index a22d7ca..2445b53 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -34,12 +34,10 @@ namespace art {
* check. We can also safely assume the constructor isn't associated
* with an interface, array, or primitive class.
*/
-static jobject Constructor_newInstance(JNIEnv* env, jobject javaMethod, jobjectArray javaArgs) {
+static jobject Constructor_newInstance(JNIEnv* env, jobject javaMethod, jobjectArray javaArgs,
+ jboolean accessible) {
ScopedFastNativeObjectAccess soa(env);
- jobject art_method = soa.Env()->GetObjectField(
- javaMethod, WellKnownClasses::java_lang_reflect_AbstractMethod_artMethod);
-
- mirror::ArtMethod* m = soa.Decode<mirror::Object*>(art_method)->AsArtMethod();
+ mirror::ArtMethod* m = mirror::ArtMethod::FromReflectedMethod(soa, javaMethod);
SirtRef<mirror::Class> c(soa.Self(), m->GetDeclaringClass());
if (UNLIKELY(c->IsAbstract())) {
ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
@@ -70,14 +68,14 @@ static jobject Constructor_newInstance(JNIEnv* env, jobject javaMethod, jobjectA
}
jobject javaReceiver = soa.AddLocalReference<jobject>(receiver);
- InvokeMethod(soa, javaMethod, javaReceiver, javaArgs);
+ InvokeMethod(soa, javaMethod, javaReceiver, javaArgs, (accessible == JNI_TRUE));
// Constructors are ()V methods, so we shouldn't touch the result of InvokeMethod.
return javaReceiver;
}
static JNINativeMethod gMethods[] = {
- NATIVE_METHOD(Constructor, newInstance, "!([Ljava/lang/Object;)Ljava/lang/Object;"),
+ NATIVE_METHOD(Constructor, newInstance, "!([Ljava/lang/Object;Z)Ljava/lang/Object;"),
};
void register_java_lang_reflect_Constructor(JNIEnv* env) {
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index 7e21d6c..ce622d9 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -20,6 +20,7 @@
#include "dex_file-inl.h"
#include "jni_internal.h"
#include "mirror/art_field-inl.h"
+#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "object_utils.h"
#include "reflection.h"
@@ -27,161 +28,193 @@
namespace art {
-static bool GetFieldValue(const ScopedFastNativeObjectAccess& soa, mirror::Object* o,
- mirror::ArtField* f, JValue& value, bool allow_references)
+static bool VerifyFieldAccess(mirror::ArtField* field, mirror::Object* obj, bool is_set)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK_EQ(value.GetJ(), INT64_C(0));
- CHECK(!kMovingFields);
- SirtRef<mirror::Object> sirt_obj(soa.Self(), o);
- SirtRef<mirror::Class> sirt_klass(soa.Self(), f->GetDeclaringClass());
- if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(sirt_klass, true, true)) {
+ if (field->IsFinal() && is_set) {
+ ThrowIllegalAccessException(nullptr, StringPrintf("Cannot set final field: %s",
+ PrettyField(field).c_str()).c_str());
return false;
}
- o = sirt_obj.get();
- switch (FieldHelper(f).GetTypeAsPrimitiveType()) {
- case Primitive::kPrimBoolean:
- value.SetZ(f->GetBoolean(o));
- return true;
- case Primitive::kPrimByte:
- value.SetB(f->GetByte(o));
- return true;
- case Primitive::kPrimChar:
- value.SetC(f->GetChar(o));
- return true;
- case Primitive::kPrimDouble:
- value.SetD(f->GetDouble(o));
- return true;
- case Primitive::kPrimFloat:
- value.SetF(f->GetFloat(o));
- return true;
- case Primitive::kPrimInt:
- value.SetI(f->GetInt(o));
- return true;
- case Primitive::kPrimLong:
- value.SetJ(f->GetLong(o));
- return true;
- case Primitive::kPrimShort:
- value.SetS(f->GetShort(o));
- return true;
- case Primitive::kPrimNot:
- if (allow_references) {
- value.SetL(f->GetObject(o));
+ if (!VerifyAccess(obj, field->GetDeclaringClass(), field->GetAccessFlags())) {
+ ThrowIllegalAccessException(nullptr, StringPrintf("Cannot access field: %s",
+ PrettyField(field).c_str()).c_str());
+ return false;
+ }
+ return true;
+}
+
+static bool GetFieldValue(const ScopedFastNativeObjectAccess& soa, mirror::Object* o,
+ mirror::ArtField* f, Primitive::Type field_type, bool allow_references,
+ JValue* value)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK_EQ(value->GetJ(), INT64_C(0));
+ switch (field_type) {
+ case Primitive::kPrimBoolean:
+ value->SetZ(f->GetBoolean(o));
return true;
- }
- // Else break to report an error.
- break;
- case Primitive::kPrimVoid:
- // Never okay.
- break;
+ case Primitive::kPrimByte:
+ value->SetB(f->GetByte(o));
+ return true;
+ case Primitive::kPrimChar:
+ value->SetC(f->GetChar(o));
+ return true;
+ case Primitive::kPrimDouble:
+ value->SetD(f->GetDouble(o));
+ return true;
+ case Primitive::kPrimFloat:
+ value->SetF(f->GetFloat(o));
+ return true;
+ case Primitive::kPrimInt:
+ value->SetI(f->GetInt(o));
+ return true;
+ case Primitive::kPrimLong:
+ value->SetJ(f->GetLong(o));
+ return true;
+ case Primitive::kPrimShort:
+ value->SetS(f->GetShort(o));
+ return true;
+ case Primitive::kPrimNot:
+ if (allow_references) {
+ value->SetL(f->GetObject(o));
+ return true;
+ }
+ // Else break to report an error.
+ break;
+ case Primitive::kPrimVoid:
+ // Never okay.
+ break;
}
- ThrowIllegalArgumentException(NULL,
- StringPrintf("Not a primitive field: %s",
- PrettyField(f).c_str()).c_str());
+ ThrowIllegalArgumentException(nullptr, StringPrintf("Not a primitive field: %s",
+ PrettyField(f).c_str()).c_str());
return false;
}
static bool CheckReceiver(const ScopedFastNativeObjectAccess& soa, jobject j_rcvr,
- mirror::ArtField* f, mirror::Object*& class_or_rcvr)
+ mirror::ArtField* f, mirror::Object** class_or_rcvr)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ soa.Self()->AssertThreadSuspensionIsAllowable();
if (f->IsStatic()) {
- class_or_rcvr = f->GetDeclaringClass();
+ SirtRef<mirror::Class> sirt_klass(soa.Self(), f->GetDeclaringClass());
+ if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(sirt_klass, true, true))) {
+ DCHECK(soa.Self()->IsExceptionPending());
+ *class_or_rcvr = nullptr;
+ return false;
+ }
+ *class_or_rcvr = sirt_klass.get();
return true;
}
- class_or_rcvr = soa.Decode<mirror::Object*>(j_rcvr);
+ *class_or_rcvr = soa.Decode<mirror::Object*>(j_rcvr);
mirror::Class* declaringClass = f->GetDeclaringClass();
- if (!VerifyObjectIsClass(class_or_rcvr, declaringClass)) {
+ if (!VerifyObjectIsClass(*class_or_rcvr, declaringClass)) {
+ DCHECK(soa.Self()->IsExceptionPending());
+ *class_or_rcvr = nullptr;
return false;
}
return true;
}
-static jobject Field_get(JNIEnv* env, jobject javaField, jobject javaObj) {
+static jobject Field_get(JNIEnv* env, jobject javaField, jobject javaObj, jboolean accessible) {
ScopedFastNativeObjectAccess soa(env);
- mirror::ArtField* f = soa.DecodeField(env->FromReflectedField(javaField));
- mirror::Object* o = NULL;
- if (!CheckReceiver(soa, javaObj, f, o)) {
- return NULL;
+ CHECK(!kMovingFields) << "CheckReceiver may trigger thread suspension for initialization";
+ mirror::ArtField* f = mirror::ArtField::FromReflectedField(soa, javaField);
+ mirror::Object* o = nullptr;
+ if (!CheckReceiver(soa, javaObj, f, &o)) {
+ DCHECK(soa.Self()->IsExceptionPending());
+ return nullptr;
}
-
+ // If field is not set to be accessible, verify it can be accessed by the caller.
+ if ((accessible == JNI_FALSE) && !VerifyFieldAccess(f, o, false)) {
+ DCHECK(soa.Self()->IsExceptionPending());
+ return nullptr;
+ }
+ // We now don't expect suspension unless an exception is thrown.
// Get the field's value, boxing if necessary.
+ Primitive::Type field_type = FieldHelper(f).GetTypeAsPrimitiveType();
JValue value;
- if (!GetFieldValue(soa, o, f, value, true)) {
- return NULL;
+ if (!GetFieldValue(soa, o, f, field_type, true, &value)) {
+ DCHECK(soa.Self()->IsExceptionPending());
+ return nullptr;
}
- return
- soa.AddLocalReference<jobject>(BoxPrimitive(FieldHelper(f).GetTypeAsPrimitiveType(), value));
+ return soa.AddLocalReference<jobject>(BoxPrimitive(field_type, value));
}
static JValue GetPrimitiveField(JNIEnv* env, jobject javaField, jobject javaObj,
- char dst_descriptor) {
+ char dst_descriptor, jboolean accessible) {
ScopedFastNativeObjectAccess soa(env);
- mirror::ArtField* f = soa.DecodeField(env->FromReflectedField(javaField));
- mirror::Object* o = NULL;
- if (!CheckReceiver(soa, javaObj, f, o)) {
+ CHECK(!kMovingFields) << "CheckReceiver may trigger thread suspension for initialization";
+ mirror::ArtField* f = mirror::ArtField::FromReflectedField(soa, javaField);
+ mirror::Object* o = nullptr;
+ if (!CheckReceiver(soa, javaObj, f, &o)) {
+ DCHECK(soa.Self()->IsExceptionPending());
return JValue();
}
+ // If field is not set to be accessible, verify it can be accessed by the caller.
+ if ((accessible == JNI_FALSE) && !VerifyFieldAccess(f, o, false)) {
+ DCHECK(soa.Self()->IsExceptionPending());
+ return JValue();
+ }
+
+ // We now don't expect suspension unless an exception is thrown.
// Read the value.
+ Primitive::Type field_type = FieldHelper(f).GetTypeAsPrimitiveType();
JValue field_value;
- if (!GetFieldValue(soa, o, f, field_value, false)) {
+ if (!GetFieldValue(soa, o, f, field_type, false, &field_value)) {
+ DCHECK(soa.Self()->IsExceptionPending());
return JValue();
}
// Widen it if necessary (and possible).
JValue wide_value;
- mirror::Class* dst_type =
- Runtime::Current()->GetClassLinker()->FindPrimitiveClass(dst_descriptor);
- if (!ConvertPrimitiveValue(NULL, false, FieldHelper(f).GetTypeAsPrimitiveType(),
- dst_type->GetPrimitiveType(), field_value, wide_value)) {
+ if (!ConvertPrimitiveValue(NULL, false, field_type, Primitive::GetType(dst_descriptor),
+ field_value, &wide_value)) {
+ DCHECK(soa.Self()->IsExceptionPending());
return JValue();
}
return wide_value;
}
-static jboolean Field_getBoolean(JNIEnv* env, jobject javaField, jobject javaObj) {
- return GetPrimitiveField(env, javaField, javaObj, 'Z').GetZ();
+static jboolean Field_getBoolean(JNIEnv* env, jobject javaField, jobject javaObj,
+ jboolean accessible) {
+ return GetPrimitiveField(env, javaField, javaObj, 'Z', accessible).GetZ();
}
-static jbyte Field_getByte(JNIEnv* env, jobject javaField, jobject javaObj) {
- return GetPrimitiveField(env, javaField, javaObj, 'B').GetB();
+static jbyte Field_getByte(JNIEnv* env, jobject javaField, jobject javaObj, jboolean accessible) {
+ return GetPrimitiveField(env, javaField, javaObj, 'B', accessible).GetB();
}
-static jchar Field_getChar(JNIEnv* env, jobject javaField, jobject javaObj) {
- return GetPrimitiveField(env, javaField, javaObj, 'C').GetC();
+static jchar Field_getChar(JNIEnv* env, jobject javaField, jobject javaObj, jboolean accessible) {
+ return GetPrimitiveField(env, javaField, javaObj, 'C', accessible).GetC();
}
-static jdouble Field_getDouble(JNIEnv* env, jobject javaField, jobject javaObj) {
- return GetPrimitiveField(env, javaField, javaObj, 'D').GetD();
+static jdouble Field_getDouble(JNIEnv* env, jobject javaField, jobject javaObj,
+ jboolean accessible) {
+ return GetPrimitiveField(env, javaField, javaObj, 'D', accessible).GetD();
}
-static jfloat Field_getFloat(JNIEnv* env, jobject javaField, jobject javaObj) {
- return GetPrimitiveField(env, javaField, javaObj, 'F').GetF();
+static jfloat Field_getFloat(JNIEnv* env, jobject javaField, jobject javaObj, jboolean accessible) {
+ return GetPrimitiveField(env, javaField, javaObj, 'F', accessible).GetF();
}
-static jint Field_getInt(JNIEnv* env, jobject javaField, jobject javaObj) {
- return GetPrimitiveField(env, javaField, javaObj, 'I').GetI();
+static jint Field_getInt(JNIEnv* env, jobject javaField, jobject javaObj, jboolean accessible) {
+ return GetPrimitiveField(env, javaField, javaObj, 'I', accessible).GetI();
}
-static jlong Field_getLong(JNIEnv* env, jobject javaField, jobject javaObj) {
- return GetPrimitiveField(env, javaField, javaObj, 'J').GetJ();
+static jlong Field_getLong(JNIEnv* env, jobject javaField, jobject javaObj, jboolean accessible) {
+ return GetPrimitiveField(env, javaField, javaObj, 'J', accessible).GetJ();
}
-static jshort Field_getShort(JNIEnv* env, jobject javaField, jobject javaObj) {
- return GetPrimitiveField(env, javaField, javaObj, 'S').GetS();
+static jshort Field_getShort(JNIEnv* env, jobject javaField, jobject javaObj, jboolean accessible) {
+ return GetPrimitiveField(env, javaField, javaObj, 'S', accessible).GetS();
}
static void SetFieldValue(ScopedFastNativeObjectAccess& soa, mirror::Object* o,
- mirror::ArtField* f, const JValue& new_value, bool allow_references)
+ mirror::ArtField* f, Primitive::Type field_type, bool allow_references,
+ const JValue& new_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CHECK(!kMovingFields);
- SirtRef<mirror::Object> sirt_obj(soa.Self(), o);
- SirtRef<mirror::Class> sirt_klass(soa.Self(), f->GetDeclaringClass());
- if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(sirt_klass, true, true)) {
- return;
- }
- o = sirt_obj.get();
- switch (FieldHelper(f).GetTypeAsPrimitiveType()) {
+ DCHECK(f->GetDeclaringClass()->IsInitialized());
+ switch (field_type) {
case Primitive::kPrimBoolean:
f->SetBoolean<false>(o, new_value.GetZ());
break;
@@ -214,132 +247,166 @@ static void SetFieldValue(ScopedFastNativeObjectAccess& soa, mirror::Object* o,
// Else fall through to report an error.
case Primitive::kPrimVoid:
// Never okay.
- ThrowIllegalArgumentException(NULL, StringPrintf("Not a primitive field: %s",
- PrettyField(f).c_str()).c_str());
+ ThrowIllegalArgumentException(nullptr, StringPrintf("Not a primitive field: %s",
+ PrettyField(f).c_str()).c_str());
return;
}
-
- // Special handling for final fields on SMP systems.
- // We need a store/store barrier here (JMM requirement).
- if (f->IsFinal()) {
- QuasiAtomic::MembarStoreLoad();
- }
}
-static void Field_set(JNIEnv* env, jobject javaField, jobject javaObj, jobject javaValue) {
+static void Field_set(JNIEnv* env, jobject javaField, jobject javaObj, jobject javaValue,
+ jboolean accessible) {
ScopedFastNativeObjectAccess soa(env);
- mirror::ArtField* f = soa.DecodeField(env->FromReflectedField(javaField));
-
+ CHECK(!kMovingFields) << "CheckReceiver may trigger thread suspension for initialization";
+ mirror::ArtField* f = mirror::ArtField::FromReflectedField(soa, javaField);
+ // Check that the receiver is non-null and an instance of the field's declaring class.
+ mirror::Object* o = nullptr;
+ if (!CheckReceiver(soa, javaObj, f, &o)) {
+ DCHECK(soa.Self()->IsExceptionPending());
+ return;
+ }
+ Primitive::Type field_prim_type;
+ mirror::Class* field_type;
+ {
+ FieldHelper fh(f);
+ const char* field_type_desciptor = fh.GetTypeDescriptor();
+ field_prim_type = Primitive::GetType(field_type_desciptor[0]);
+ if (field_prim_type == Primitive::kPrimNot) {
+ SirtRef<mirror::Object> sirt_obj(soa.Self(), o);
+ // May cause resolution.
+ CHECK(!kMovingFields) << "Resolution may trigger thread suspension";
+ field_type = fh.GetType(true);
+ if (field_type == nullptr) {
+ DCHECK(soa.Self()->IsExceptionPending());
+ return;
+ }
+ } else {
+ field_type = Runtime::Current()->GetClassLinker()->FindPrimitiveClass(field_type_desciptor[0]);
+ }
+ }
+ // We now don't expect suspension unless an exception is thrown.
// Unbox the value, if necessary.
mirror::Object* boxed_value = soa.Decode<mirror::Object*>(javaValue);
JValue unboxed_value;
- if (!UnboxPrimitiveForField(boxed_value, FieldHelper(f).GetType(), unboxed_value, f)) {
+ if (!UnboxPrimitiveForField(boxed_value, field_type, f, &unboxed_value)) {
+ DCHECK(soa.Self()->IsExceptionPending());
return;
}
-
- // Check that the receiver is non-null and an instance of the field's declaring class.
- mirror::Object* o = NULL;
- if (!CheckReceiver(soa, javaObj, f, o)) {
+ // If field is not set to be accessible, verify it can be accessed by the caller.
+ if ((accessible == JNI_FALSE) && !VerifyFieldAccess(f, o, true)) {
+ DCHECK(soa.Self()->IsExceptionPending());
return;
}
-
- SetFieldValue(soa, o, f, unboxed_value, true);
+ SetFieldValue(soa, o, f, field_prim_type, true, unboxed_value);
}
static void SetPrimitiveField(JNIEnv* env, jobject javaField, jobject javaObj, char src_descriptor,
- const JValue& new_value) {
+ const JValue& new_value, jboolean accessible) {
ScopedFastNativeObjectAccess soa(env);
- mirror::ArtField* f = soa.DecodeField(env->FromReflectedField(javaField));
- mirror::Object* o = NULL;
- if (!CheckReceiver(soa, javaObj, f, o)) {
+ mirror::ArtField* f = mirror::ArtField::FromReflectedField(soa, javaField);
+ mirror::Object* o = nullptr;
+ if (!CheckReceiver(soa, javaObj, f, &o)) {
return;
}
- FieldHelper fh(f);
- if (!fh.IsPrimitiveType()) {
- ThrowIllegalArgumentException(NULL, StringPrintf("Not a primitive field: %s",
- PrettyField(f).c_str()).c_str());
+ Primitive::Type field_type = FieldHelper(f).GetTypeAsPrimitiveType();
+ if (UNLIKELY(field_type == Primitive::kPrimNot)) {
+ ThrowIllegalArgumentException(nullptr, StringPrintf("Not a primitive field: %s",
+ PrettyField(f).c_str()).c_str());
return;
}
// Widen the value if necessary (and possible).
JValue wide_value;
- mirror::Class* src_type = Runtime::Current()->GetClassLinker()->FindPrimitiveClass(src_descriptor);
- if (!ConvertPrimitiveValue(NULL, false, src_type->GetPrimitiveType(), fh.GetTypeAsPrimitiveType(),
- new_value, wide_value)) {
+ if (!ConvertPrimitiveValue(nullptr, false, Primitive::GetType(src_descriptor),
+ field_type, new_value, &wide_value)) {
+ DCHECK(soa.Self()->IsExceptionPending());
+ return;
+ }
+
+ // If field is not set to be accessible, verify it can be accessed by the caller.
+ if ((accessible == JNI_FALSE) && !VerifyFieldAccess(f, o, true)) {
+ DCHECK(soa.Self()->IsExceptionPending());
return;
}
// Write the value.
- SetFieldValue(soa, o, f, wide_value, false);
+ SetFieldValue(soa, o, f, field_type, false, wide_value);
}
-static void Field_setBoolean(JNIEnv* env, jobject javaField, jobject javaObj, jboolean z) {
+static void Field_setBoolean(JNIEnv* env, jobject javaField, jobject javaObj, jboolean z,
+ jboolean accessible) {
JValue value;
value.SetZ(z);
- SetPrimitiveField(env, javaField, javaObj, 'Z', value);
+ SetPrimitiveField(env, javaField, javaObj, 'Z', value, accessible);
}
-static void Field_setByte(JNIEnv* env, jobject javaField, jobject javaObj, jbyte b) {
+static void Field_setByte(JNIEnv* env, jobject javaField, jobject javaObj, jbyte b,
+ jboolean accessible) {
JValue value;
value.SetB(b);
- SetPrimitiveField(env, javaField, javaObj, 'B', value);
+ SetPrimitiveField(env, javaField, javaObj, 'B', value, accessible);
}
-static void Field_setChar(JNIEnv* env, jobject javaField, jobject javaObj, jchar c) {
+static void Field_setChar(JNIEnv* env, jobject javaField, jobject javaObj, jchar c,
+ jboolean accessible) {
JValue value;
value.SetC(c);
- SetPrimitiveField(env, javaField, javaObj, 'C', value);
+ SetPrimitiveField(env, javaField, javaObj, 'C', value, accessible);
}
-static void Field_setDouble(JNIEnv* env, jobject javaField, jobject javaObj, jdouble d) {
+static void Field_setDouble(JNIEnv* env, jobject javaField, jobject javaObj, jdouble d,
+ jboolean accessible) {
JValue value;
value.SetD(d);
- SetPrimitiveField(env, javaField, javaObj, 'D', value);
+ SetPrimitiveField(env, javaField, javaObj, 'D', value, accessible);
}
-static void Field_setFloat(JNIEnv* env, jobject javaField, jobject javaObj, jfloat f) {
+static void Field_setFloat(JNIEnv* env, jobject javaField, jobject javaObj, jfloat f,
+ jboolean accessible) {
JValue value;
value.SetF(f);
- SetPrimitiveField(env, javaField, javaObj, 'F', value);
+ SetPrimitiveField(env, javaField, javaObj, 'F', value, accessible);
}
-static void Field_setInt(JNIEnv* env, jobject javaField, jobject javaObj, jint i) {
+static void Field_setInt(JNIEnv* env, jobject javaField, jobject javaObj, jint i,
+ jboolean accessible) {
JValue value;
value.SetI(i);
- SetPrimitiveField(env, javaField, javaObj, 'I', value);
+ SetPrimitiveField(env, javaField, javaObj, 'I', value, accessible);
}
-static void Field_setLong(JNIEnv* env, jobject javaField, jobject javaObj, jlong j) {
+static void Field_setLong(JNIEnv* env, jobject javaField, jobject javaObj, jlong j,
+ jboolean accessible) {
JValue value;
value.SetJ(j);
- SetPrimitiveField(env, javaField, javaObj, 'J', value);
+ SetPrimitiveField(env, javaField, javaObj, 'J', value, accessible);
}
-static void Field_setShort(JNIEnv* env, jobject javaField, jobject javaObj, jshort s) {
+static void Field_setShort(JNIEnv* env, jobject javaField, jobject javaObj, jshort s,
+ jboolean accessible) {
JValue value;
value.SetS(s);
- SetPrimitiveField(env, javaField, javaObj, 'S', value);
+ SetPrimitiveField(env, javaField, javaObj, 'S', value, accessible);
}
static JNINativeMethod gMethods[] = {
- NATIVE_METHOD(Field, get, "!(Ljava/lang/Object;)Ljava/lang/Object;"),
- NATIVE_METHOD(Field, getBoolean, "!(Ljava/lang/Object;)Z"),
- NATIVE_METHOD(Field, getByte, "!(Ljava/lang/Object;)B"),
- NATIVE_METHOD(Field, getChar, "!(Ljava/lang/Object;)C"),
- NATIVE_METHOD(Field, getDouble, "!(Ljava/lang/Object;)D"),
- NATIVE_METHOD(Field, getFloat, "!(Ljava/lang/Object;)F"),
- NATIVE_METHOD(Field, getInt, "!(Ljava/lang/Object;)I"),
- NATIVE_METHOD(Field, getLong, "!(Ljava/lang/Object;)J"),
- NATIVE_METHOD(Field, getShort, "!(Ljava/lang/Object;)S"),
- NATIVE_METHOD(Field, set, "!(Ljava/lang/Object;Ljava/lang/Object;)V"),
- NATIVE_METHOD(Field, setBoolean, "!(Ljava/lang/Object;Z)V"),
- NATIVE_METHOD(Field, setByte, "!(Ljava/lang/Object;B)V"),
- NATIVE_METHOD(Field, setChar, "!(Ljava/lang/Object;C)V"),
- NATIVE_METHOD(Field, setDouble, "!(Ljava/lang/Object;D)V"),
- NATIVE_METHOD(Field, setFloat, "!(Ljava/lang/Object;F)V"),
- NATIVE_METHOD(Field, setInt, "!(Ljava/lang/Object;I)V"),
- NATIVE_METHOD(Field, setLong, "!(Ljava/lang/Object;J)V"),
- NATIVE_METHOD(Field, setShort, "!(Ljava/lang/Object;S)V"),
+ NATIVE_METHOD(Field, get, "!(Ljava/lang/Object;Z)Ljava/lang/Object;"),
+ NATIVE_METHOD(Field, getBoolean, "!(Ljava/lang/Object;Z)Z"),
+ NATIVE_METHOD(Field, getByte, "!(Ljava/lang/Object;Z)B"),
+ NATIVE_METHOD(Field, getChar, "!(Ljava/lang/Object;Z)C"),
+ NATIVE_METHOD(Field, getDouble, "!(Ljava/lang/Object;Z)D"),
+ NATIVE_METHOD(Field, getFloat, "!(Ljava/lang/Object;Z)F"),
+ NATIVE_METHOD(Field, getInt, "!(Ljava/lang/Object;Z)I"),
+ NATIVE_METHOD(Field, getLong, "!(Ljava/lang/Object;Z)J"),
+ NATIVE_METHOD(Field, getShort, "!(Ljava/lang/Object;Z)S"),
+ NATIVE_METHOD(Field, set, "!(Ljava/lang/Object;Ljava/lang/Object;Z)V"),
+ NATIVE_METHOD(Field, setBoolean, "!(Ljava/lang/Object;ZZ)V"),
+ NATIVE_METHOD(Field, setByte, "!(Ljava/lang/Object;BZ)V"),
+ NATIVE_METHOD(Field, setChar, "!(Ljava/lang/Object;CZ)V"),
+ NATIVE_METHOD(Field, setDouble, "!(Ljava/lang/Object;DZ)V"),
+ NATIVE_METHOD(Field, setFloat, "!(Ljava/lang/Object;FZ)V"),
+ NATIVE_METHOD(Field, setInt, "!(Ljava/lang/Object;IZ)V"),
+ NATIVE_METHOD(Field, setLong, "!(Ljava/lang/Object;JZ)V"),
+ NATIVE_METHOD(Field, setShort, "!(Ljava/lang/Object;SZ)V"),
};
void register_java_lang_reflect_Field(JNIEnv* env) {
diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc
index 0b8bb7b..22e81e4 100644
--- a/runtime/native/java_lang_reflect_Method.cc
+++ b/runtime/native/java_lang_reflect_Method.cc
@@ -29,18 +29,15 @@
namespace art {
-static jobject Method_invoke(JNIEnv* env,
- jobject javaMethod, jobject javaReceiver, jobject javaArgs) {
+static jobject Method_invoke(JNIEnv* env, jobject javaMethod, jobject javaReceiver,
+ jobject javaArgs, jboolean accessible) {
ScopedFastNativeObjectAccess soa(env);
- return InvokeMethod(soa, javaMethod, javaReceiver, javaArgs);
+ return InvokeMethod(soa, javaMethod, javaReceiver, javaArgs, (accessible == JNI_TRUE));
}
static jobject Method_getExceptionTypesNative(JNIEnv* env, jobject javaMethod) {
ScopedFastNativeObjectAccess soa(env);
- jobject art_method = soa.Env()->GetObjectField(
- javaMethod, WellKnownClasses::java_lang_reflect_AbstractMethod_artMethod);
-
- mirror::ArtMethod* proxy_method = soa.Decode<mirror::Object*>(art_method)->AsArtMethod();
+ mirror::ArtMethod* proxy_method = mirror::ArtMethod::FromReflectedMethod(soa, javaMethod);
CHECK(proxy_method->GetDeclaringClass()->IsProxyClass());
mirror::SynthesizedProxyClass* proxy_class =
down_cast<mirror::SynthesizedProxyClass*>(proxy_method->GetDeclaringClass());
@@ -59,7 +56,7 @@ static jobject Method_getExceptionTypesNative(JNIEnv* env, jobject javaMethod) {
}
static JNINativeMethod gMethods[] = {
- NATIVE_METHOD(Method, invoke, "!(Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object;"),
+ NATIVE_METHOD(Method, invoke, "!(Ljava/lang/Object;[Ljava/lang/Object;Z)Ljava/lang/Object;"),
NATIVE_METHOD(Method, getExceptionTypesNative, "!()[Ljava/lang/Class;"),
};
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
index 1b9ebe4..a7ca0b8 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
@@ -44,23 +44,31 @@ static jboolean DdmVmInternal_getRecentAllocationStatus(JNIEnv*, jclass) {
* NULL on failure, e.g. if the threadId couldn't be found.
*/
static jobjectArray DdmVmInternal_getStackTraceById(JNIEnv* env, jclass, jint thin_lock_id) {
- // Suspend thread to build stack trace.
- ThreadList* thread_list = Runtime::Current()->GetThreadList();
jobjectArray trace = nullptr;
- bool timed_out;
- Thread* thread = thread_list->SuspendThreadByThreadId(thin_lock_id, false, &timed_out);
- if (thread != NULL) {
- {
- ScopedObjectAccess soa(env);
- jobject internal_trace = thread->CreateInternalStackTrace(soa);
- trace = Thread::InternalStackTraceToStackTraceElementArray(soa, internal_trace);
- }
- // Restart suspended thread.
- thread_list->Resume(thread, false);
+ Thread* const self = Thread::Current();
+ if (static_cast<uint32_t>(thin_lock_id) == self->GetThreadId()) {
+ // No need to suspend ourself to build stacktrace.
+ ScopedObjectAccess soa(env);
+ jobject internal_trace = self->CreateInternalStackTrace(soa);
+ trace = Thread::InternalStackTraceToStackTraceElementArray(soa, internal_trace);
} else {
- if (timed_out) {
- LOG(ERROR) << "Trying to get thread's stack by id failed as the thread failed to suspend "
- "within a generous timeout.";
+ // Suspend thread to build stack trace.
+ ThreadList* thread_list = Runtime::Current()->GetThreadList();
+ bool timed_out;
+ Thread* thread = thread_list->SuspendThreadByThreadId(thin_lock_id, false, &timed_out);
+ if (thread != nullptr) {
+ {
+ ScopedObjectAccess soa(env);
+ jobject internal_trace = thread->CreateInternalStackTrace(soa);
+ trace = Thread::InternalStackTraceToStackTraceElementArray(soa, internal_trace);
+ }
+ // Restart suspended thread.
+ thread_list->Resume(thread, false);
+ } else {
+ if (timed_out) {
+ LOG(ERROR) << "Trying to get thread's stack by id failed as the thread failed to suspend "
+ "within a generous timeout.";
+ }
}
}
return trace;
diff --git a/runtime/object_callbacks.h b/runtime/object_callbacks.h
index 468ba08..89ee34e 100644
--- a/runtime/object_callbacks.h
+++ b/runtime/object_callbacks.h
@@ -25,6 +25,7 @@
namespace art {
namespace mirror {
class Object;
+template<class MirrorType> class HeapReference;
} // namespace mirror
class StackVisitor;
@@ -56,6 +57,9 @@ typedef mirror::Object* (MarkObjectCallback)(mirror::Object* obj, void* arg)
// A callback for verifying roots.
typedef void (VerifyRootCallback)(const mirror::Object* root, void* arg, size_t vreg,
const StackVisitor* visitor);
+
+typedef void (MarkHeapReferenceCallback)(mirror::HeapReference<mirror::Object>* ref, void* arg);
+
// A callback for testing if an object is marked, returns nullptr if not marked, otherwise the new
// address the object (if the object didn't move, returns the object input parameter).
typedef mirror::Object* (IsMarkedCallback)(mirror::Object* object, void* arg)
diff --git a/runtime/offsets.h b/runtime/offsets.h
index e2dba9d..ed4e49e 100644
--- a/runtime/offsets.h
+++ b/runtime/offsets.h
@@ -32,6 +32,10 @@ class Offset {
uint32_t Uint32Value() const {
return static_cast<uint32_t>(val_);
}
+ size_t SizeValue() const {
+ return val_;
+ }
+
protected:
size_t val_;
};
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 5717689..e2086f1 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -107,6 +107,8 @@ static gc::CollectorType ParseCollectorType(const std::string& option) {
return gc::kCollectorTypeSS;
} else if (option == "GSS") {
return gc::kCollectorTypeGSS;
+ } else if (option == "CC") {
+ return gc::kCollectorTypeCC;
} else {
return gc::kCollectorTypeNone;
}
@@ -203,11 +205,13 @@ bool ParsedOptions::Parse(const Runtime::Options& options, bool ignore_unrecogni
std::string checks(buf);
std::vector<std::string> checkvec;
Split(checks, ',', checkvec);
+ explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck |
+ kExplicitStackOverflowCheck;
for (auto& str : checkvec) {
std::string val = Trim(str);
if (val == "none") {
explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck |
- kExplicitStackOverflowCheck;
+ kExplicitStackOverflowCheck;
} else if (val == "null") {
explicit_checks_ &= ~kExplicitNullCheck;
} else if (val == "suspend") {
diff --git a/runtime/quick/inline_method_analyser.cc b/runtime/quick/inline_method_analyser.cc
index 4388d31..a9072d8 100644
--- a/runtime/quick/inline_method_analyser.cc
+++ b/runtime/quick/inline_method_analyser.cc
@@ -80,8 +80,15 @@ COMPILE_ASSERT(InlineMethodAnalyser::IGetVariant(Instruction::IGET_CHAR) ==
COMPILE_ASSERT(InlineMethodAnalyser::IGetVariant(Instruction::IGET_SHORT) ==
InlineMethodAnalyser::IPutVariant(Instruction::IPUT_SHORT), check_iget_iput_short_variant);
+// This is used by compiler and debugger. We look into the dex cache for resolved methods and
+// fields. However, in the context of the debugger, not all methods and fields are resolved. Since
+// we need to be able to detect possibly inlined method, we pass a null inline method to indicate
+// we don't want to take unresolved methods and fields into account during analysis.
bool InlineMethodAnalyser::AnalyseMethodCode(verifier::MethodVerifier* verifier,
InlineMethod* method) {
+ DCHECK(verifier != nullptr);
+ DCHECK_EQ(Runtime::Current()->IsCompiler(), method != nullptr);
+ DCHECK_EQ(verifier->CanLoadClasses(), method != nullptr);
// We currently support only plain return or 2-instruction methods.
const DexFile::CodeItem* code_item = verifier->CodeItem();
@@ -91,9 +98,11 @@ bool InlineMethodAnalyser::AnalyseMethodCode(verifier::MethodVerifier* verifier,
switch (opcode) {
case Instruction::RETURN_VOID:
- method->opcode = kInlineOpNop;
- method->flags = kInlineSpecial;
- method->d.data = 0u;
+ if (method != nullptr) {
+ method->opcode = kInlineOpNop;
+ method->flags = kInlineSpecial;
+ method->d.data = 0u;
+ }
return true;
case Instruction::RETURN:
case Instruction::RETURN_OBJECT:
@@ -136,14 +145,16 @@ bool InlineMethodAnalyser::AnalyseReturnMethod(const DexFile::CodeItem* code_ite
DCHECK_LT((return_opcode == Instruction::RETURN_WIDE) ? reg + 1 : reg,
code_item->registers_size_);
- result->opcode = kInlineOpReturnArg;
- result->flags = kInlineSpecial;
- InlineReturnArgData* data = &result->d.return_data;
- data->arg = reg - arg_start;
- data->is_wide = (return_opcode == Instruction::RETURN_WIDE) ? 1u : 0u;
- data->is_object = (return_opcode == Instruction::RETURN_OBJECT) ? 1u : 0u;
- data->reserved = 0u;
- data->reserved2 = 0u;
+ if (result != nullptr) {
+ result->opcode = kInlineOpReturnArg;
+ result->flags = kInlineSpecial;
+ InlineReturnArgData* data = &result->d.return_data;
+ data->arg = reg - arg_start;
+ data->is_wide = (return_opcode == Instruction::RETURN_WIDE) ? 1u : 0u;
+ data->is_object = (return_opcode == Instruction::RETURN_OBJECT) ? 1u : 0u;
+ data->reserved = 0u;
+ data->reserved2 = 0u;
+ }
return true;
}
@@ -173,9 +184,11 @@ bool InlineMethodAnalyser::AnalyseConstMethod(const DexFile::CodeItem* code_item
if (return_opcode == Instruction::RETURN_OBJECT && vB != 0) {
return false; // Returning non-null reference constant?
}
- result->opcode = kInlineOpNonWideConst;
- result->flags = kInlineSpecial;
- result->d.data = static_cast<uint64_t>(vB);
+ if (result != nullptr) {
+ result->opcode = kInlineOpNonWideConst;
+ result->flags = kInlineSpecial;
+ result->d.data = static_cast<uint64_t>(vB);
+ }
return true;
}
@@ -215,18 +228,19 @@ bool InlineMethodAnalyser::AnalyseIGetMethod(verifier::MethodVerifier* verifier,
return false;
}
- if (!ComputeSpecialAccessorInfo(field_idx, false, verifier, &result->d.ifield_data)) {
- return false;
+ if (result != nullptr) {
+ InlineIGetIPutData* data = &result->d.ifield_data;
+ if (!ComputeSpecialAccessorInfo(field_idx, false, verifier, data)) {
+ return false;
+ }
+ result->opcode = kInlineOpIGet;
+ result->flags = kInlineSpecial;
+ data->op_variant = IGetVariant(opcode);
+ data->object_arg = object_reg - arg_start; // Allow IGET on any register, not just "this".
+ data->src_arg = 0;
+ data->method_is_static = (verifier->GetAccessFlags() & kAccStatic) != 0;
+ data->reserved = 0;
}
-
- result->opcode = kInlineOpIGet;
- result->flags = kInlineSpecial;
- InlineIGetIPutData* data = &result->d.ifield_data;
- data->op_variant = IGetVariant(opcode);
- data->object_arg = object_reg - arg_start; // Allow IGET on any register, not just "this".
- data->src_arg = 0;
- data->method_is_static = (verifier->GetAccessFlags() & kAccStatic) != 0;
- data->reserved = 0;
return true;
}
@@ -262,18 +276,19 @@ bool InlineMethodAnalyser::AnalyseIPutMethod(verifier::MethodVerifier* verifier,
return false;
}
- if (!ComputeSpecialAccessorInfo(field_idx, true, verifier, &result->d.ifield_data)) {
- return false;
+ if (result != nullptr) {
+ InlineIGetIPutData* data = &result->d.ifield_data;
+ if (!ComputeSpecialAccessorInfo(field_idx, true, verifier, data)) {
+ return false;
+ }
+ result->opcode = kInlineOpIPut;
+ result->flags = kInlineSpecial;
+ data->op_variant = IPutVariant(opcode);
+ data->object_arg = object_reg - arg_start; // Allow IPUT on any register, not just "this".
+ data->src_arg = src_reg - arg_start;
+ data->method_is_static = (verifier->GetAccessFlags() & kAccStatic) != 0;
+ data->reserved = 0;
}
-
- result->opcode = kInlineOpIPut;
- result->flags = kInlineSpecial;
- InlineIGetIPutData* data = &result->d.ifield_data;
- data->op_variant = IPutVariant(opcode);
- data->object_arg = object_reg - arg_start; // Allow IPUT on any register, not just "this".
- data->src_arg = src_reg - arg_start;
- data->method_is_static = (verifier->GetAccessFlags() & kAccStatic) != 0;
- data->reserved = 0;
return true;
}
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index dde9a94..6ed61f6 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -26,6 +26,7 @@
#include "mirror/class-inl.h"
#include "mirror/object_array.h"
#include "mirror/object_array-inl.h"
+#include "nth_caller_visitor.h"
#include "object_utils.h"
#include "scoped_thread_state_change.h"
#include "stack.h"
@@ -461,9 +462,8 @@ void InvokeWithShadowFrame(Thread* self, ShadowFrame* shadow_frame, uint16_t arg
}
jobject InvokeMethod(const ScopedObjectAccess& soa, jobject javaMethod,
- jobject javaReceiver, jobject javaArgs) {
- jmethodID mid = soa.Env()->FromReflectedMethod(javaMethod);
- mirror::ArtMethod* m = soa.DecodeMethod(mid);
+ jobject javaReceiver, jobject javaArgs, bool accessible) {
+ mirror::ArtMethod* m = mirror::ArtMethod::FromReflectedMethod(soa, javaMethod);
mirror::Class* declaring_class = m->GetDeclaringClass();
if (UNLIKELY(!declaring_class->IsInitialized())) {
@@ -500,6 +500,13 @@ jobject InvokeMethod(const ScopedObjectAccess& soa, jobject javaMethod,
return NULL;
}
+ // If method is not set to be accessible, verify it can be accessed by the caller.
+ if (!accessible && !VerifyAccess(receiver, declaring_class, m->GetAccessFlags())) {
+ ThrowIllegalAccessException(nullptr, StringPrintf("Cannot access method: %s",
+ PrettyMethod(m).c_str()).c_str());
+ return nullptr;
+ }
+
// Invoke the method.
JValue result;
ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength());
@@ -544,76 +551,58 @@ bool VerifyObjectIsClass(mirror::Object* o, mirror::Class* c) {
bool ConvertPrimitiveValue(const ThrowLocation* throw_location, bool unbox_for_result,
Primitive::Type srcType, Primitive::Type dstType,
- const JValue& src, JValue& dst) {
- CHECK(srcType != Primitive::kPrimNot && dstType != Primitive::kPrimNot);
+ const JValue& src, JValue* dst) {
+ DCHECK(srcType != Primitive::kPrimNot && dstType != Primitive::kPrimNot);
+ if (LIKELY(srcType == dstType)) {
+ dst->SetJ(src.GetJ());
+ return true;
+ }
switch (dstType) {
- case Primitive::kPrimBoolean:
- if (srcType == Primitive::kPrimBoolean) {
- dst.SetZ(src.GetZ());
- return true;
- }
- break;
- case Primitive::kPrimChar:
- if (srcType == Primitive::kPrimChar) {
- dst.SetC(src.GetC());
- return true;
- }
- break;
+ case Primitive::kPrimBoolean: // Fall-through.
+ case Primitive::kPrimChar: // Fall-through.
case Primitive::kPrimByte:
- if (srcType == Primitive::kPrimByte) {
- dst.SetB(src.GetB());
- return true;
- }
+ // Only expect assignment with source and destination of identical type.
break;
case Primitive::kPrimShort:
- if (srcType == Primitive::kPrimByte || srcType == Primitive::kPrimShort) {
- dst.SetS(src.GetI());
+ if (srcType == Primitive::kPrimByte) {
+ dst->SetS(src.GetI());
return true;
}
break;
case Primitive::kPrimInt:
if (srcType == Primitive::kPrimByte || srcType == Primitive::kPrimChar ||
- srcType == Primitive::kPrimShort || srcType == Primitive::kPrimInt) {
- dst.SetI(src.GetI());
+ srcType == Primitive::kPrimShort) {
+ dst->SetI(src.GetI());
return true;
}
break;
case Primitive::kPrimLong:
if (srcType == Primitive::kPrimByte || srcType == Primitive::kPrimChar ||
srcType == Primitive::kPrimShort || srcType == Primitive::kPrimInt) {
- dst.SetJ(src.GetI());
- return true;
- } else if (srcType == Primitive::kPrimLong) {
- dst.SetJ(src.GetJ());
+ dst->SetJ(src.GetI());
return true;
}
break;
case Primitive::kPrimFloat:
if (srcType == Primitive::kPrimByte || srcType == Primitive::kPrimChar ||
srcType == Primitive::kPrimShort || srcType == Primitive::kPrimInt) {
- dst.SetF(src.GetI());
+ dst->SetF(src.GetI());
return true;
} else if (srcType == Primitive::kPrimLong) {
- dst.SetF(src.GetJ());
- return true;
- } else if (srcType == Primitive::kPrimFloat) {
- dst.SetF(src.GetF());
+ dst->SetF(src.GetJ());
return true;
}
break;
case Primitive::kPrimDouble:
if (srcType == Primitive::kPrimByte || srcType == Primitive::kPrimChar ||
srcType == Primitive::kPrimShort || srcType == Primitive::kPrimInt) {
- dst.SetD(src.GetI());
+ dst->SetD(src.GetI());
return true;
} else if (srcType == Primitive::kPrimLong) {
- dst.SetD(src.GetJ());
+ dst->SetD(src.GetJ());
return true;
} else if (srcType == Primitive::kPrimFloat) {
- dst.SetD(src.GetF());
- return true;
- } else if (srcType == Primitive::kPrimDouble) {
- dst.SetJ(src.GetJ());
+ dst->SetD(src.GetF());
return true;
}
break;
@@ -643,7 +632,7 @@ mirror::Object* BoxPrimitive(Primitive::Type src_class, const JValue& value) {
return nullptr;
}
- jmethodID m = NULL;
+ jmethodID m = nullptr;
const char* shorty;
switch (src_class) {
case Primitive::kPrimBoolean:
@@ -699,29 +688,25 @@ mirror::Object* BoxPrimitive(Primitive::Type src_class, const JValue& value) {
return result.GetL();
}
-static std::string UnboxingFailureKind(mirror::ArtMethod* m, int index, mirror::ArtField* f)
+static std::string UnboxingFailureKind(mirror::ArtField* f)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (m != NULL && index != -1) {
- ++index; // Humans count from 1.
- return StringPrintf("method %s argument %d", PrettyMethod(m, false).c_str(), index);
- }
- if (f != NULL) {
+ if (f != nullptr) {
return "field " + PrettyField(f, false);
}
return "result";
}
static bool UnboxPrimitive(const ThrowLocation* throw_location, mirror::Object* o,
- mirror::Class* dst_class, JValue& unboxed_value,
- mirror::ArtMethod* m, int index, mirror::ArtField* f)
+ mirror::Class* dst_class, mirror::ArtField* f,
+ JValue* unboxed_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- bool unbox_for_result = (f == NULL) && (index == -1);
+ bool unbox_for_result = (f == nullptr);
if (!dst_class->IsPrimitive()) {
- if (UNLIKELY(o != NULL && !o->InstanceOf(dst_class))) {
+ if (UNLIKELY(o != nullptr && !o->InstanceOf(dst_class))) {
if (!unbox_for_result) {
ThrowIllegalArgumentException(throw_location,
StringPrintf("%s has type %s, got %s",
- UnboxingFailureKind(m, index, f).c_str(),
+ UnboxingFailureKind(f).c_str(),
PrettyDescriptor(dst_class).c_str(),
PrettyTypeOf(o).c_str()).c_str());
} else {
@@ -732,20 +717,20 @@ static bool UnboxPrimitive(const ThrowLocation* throw_location, mirror::Object*
}
return false;
}
- unboxed_value.SetL(o);
+ unboxed_value->SetL(o);
return true;
}
if (UNLIKELY(dst_class->GetPrimitiveType() == Primitive::kPrimVoid)) {
ThrowIllegalArgumentException(throw_location,
StringPrintf("Can't unbox %s to void",
- UnboxingFailureKind(m, index, f).c_str()).c_str());
+ UnboxingFailureKind(f).c_str()).c_str());
return false;
}
- if (UNLIKELY(o == NULL)) {
+ if (UNLIKELY(o == nullptr)) {
if (!unbox_for_result) {
ThrowIllegalArgumentException(throw_location,
StringPrintf("%s has type %s, got null",
- UnboxingFailureKind(m, index, f).c_str(),
+ UnboxingFailureKind(f).c_str(),
PrettyDescriptor(dst_class).c_str()).c_str());
} else {
ThrowNullPointerException(throw_location,
@@ -757,7 +742,7 @@ static bool UnboxPrimitive(const ThrowLocation* throw_location, mirror::Object*
JValue boxed_value;
const StringPiece src_descriptor(ClassHelper(o->GetClass()).GetDescriptor());
- mirror::Class* src_class = NULL;
+ mirror::Class* src_class = nullptr;
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
mirror::ArtField* primitive_field = o->GetClass()->GetIFields()->Get(0);
if (src_descriptor == "Ljava/lang/Boolean;") {
@@ -787,7 +772,7 @@ static bool UnboxPrimitive(const ThrowLocation* throw_location, mirror::Object*
} else {
ThrowIllegalArgumentException(throw_location,
StringPrintf("%s has type %s, got %s",
- UnboxingFailureKind(m, index, f).c_str(),
+ UnboxingFailureKind(f).c_str(),
PrettyDescriptor(dst_class).c_str(),
PrettyDescriptor(src_descriptor.data()).c_str()).c_str());
return false;
@@ -798,21 +783,41 @@ static bool UnboxPrimitive(const ThrowLocation* throw_location, mirror::Object*
boxed_value, unboxed_value);
}
-bool UnboxPrimitiveForArgument(mirror::Object* o, mirror::Class* dst_class, JValue& unboxed_value,
- mirror::ArtMethod* m, size_t index) {
- CHECK(m != NULL);
- return UnboxPrimitive(NULL, o, dst_class, unboxed_value, m, index, NULL);
+bool UnboxPrimitiveForField(mirror::Object* o, mirror::Class* dst_class, mirror::ArtField* f,
+ JValue* unboxed_value) {
+ DCHECK(f != nullptr);
+ return UnboxPrimitive(nullptr, o, dst_class, f, unboxed_value);
}
-bool UnboxPrimitiveForField(mirror::Object* o, mirror::Class* dst_class, JValue& unboxed_value,
- mirror::ArtField* f) {
- CHECK(f != NULL);
- return UnboxPrimitive(NULL, o, dst_class, unboxed_value, NULL, -1, f);
+bool UnboxPrimitiveForResult(const ThrowLocation& throw_location, mirror::Object* o,
+ mirror::Class* dst_class, JValue* unboxed_value) {
+ return UnboxPrimitive(&throw_location, o, dst_class, nullptr, unboxed_value);
}
-bool UnboxPrimitiveForResult(const ThrowLocation& throw_location, mirror::Object* o,
- mirror::Class* dst_class, JValue& unboxed_value) {
- return UnboxPrimitive(&throw_location, o, dst_class, unboxed_value, NULL, -1, NULL);
+bool VerifyAccess(mirror::Object* obj, mirror::Class* declaring_class, uint32_t access_flags) {
+ NthCallerVisitor visitor(Thread::Current(), 2);
+ visitor.WalkStack();
+ mirror::Class* caller_class = visitor.caller->GetDeclaringClass();
+
+ if ((((access_flags & kAccPublic) != 0) && declaring_class->IsPublic()) ||
+ caller_class == declaring_class) {
+ return true;
+ }
+ if ((access_flags & kAccPrivate) != 0) {
+ return false;
+ }
+ if ((access_flags & kAccProtected) != 0) {
+ if (obj != nullptr && !obj->InstanceOf(caller_class) &&
+ !declaring_class->IsInSamePackage(caller_class)) {
+ return false;
+ } else if (declaring_class->IsAssignableFrom(caller_class)) {
+ return true;
+ }
+ }
+ if (!declaring_class->IsInSamePackage(caller_class)) {
+ return false;
+ }
+ return true;
}
} // namespace art
diff --git a/runtime/reflection.h b/runtime/reflection.h
index d2f9f25..d9a7228 100644
--- a/runtime/reflection.h
+++ b/runtime/reflection.h
@@ -36,19 +36,16 @@ class ThrowLocation;
mirror::Object* BoxPrimitive(Primitive::Type src_class, const JValue& value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-bool UnboxPrimitiveForArgument(mirror::Object* o, mirror::Class* dst_class, JValue& unboxed_value,
- mirror::ArtMethod* m, size_t index)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-bool UnboxPrimitiveForField(mirror::Object* o, mirror::Class* dst_class, JValue& unboxed_value,
- mirror::ArtField* f)
+bool UnboxPrimitiveForField(mirror::Object* o, mirror::Class* dst_class, mirror::ArtField* f,
+ JValue* unboxed_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool UnboxPrimitiveForResult(const ThrowLocation& throw_location, mirror::Object* o,
- mirror::Class* dst_class, JValue& unboxed_value)
+ mirror::Class* dst_class, JValue* unboxed_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool ConvertPrimitiveValue(const ThrowLocation* throw_location, bool unbox_for_result,
Primitive::Type src_class, Primitive::Type dst_class,
- const JValue& src, JValue& dst)
+ const JValue& src, JValue* dst)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
JValue InvokeWithVarArgs(const ScopedObjectAccess& soa, jobject obj, jmethodID mid, va_list args)
@@ -71,12 +68,15 @@ void InvokeWithShadowFrame(Thread* self, ShadowFrame* shadow_frame, uint16_t arg
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
jobject InvokeMethod(const ScopedObjectAccess& soa, jobject method, jobject receiver,
- jobject args)
+ jobject args, bool accessible)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool VerifyObjectIsClass(mirror::Object* o, mirror::Class* c)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+bool VerifyAccess(mirror::Object* obj, mirror::Class* declaring_class, uint32_t access_flags)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
} // namespace art
#endif // ART_RUNTIME_REFLECTION_H_
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index eaa27de..a8da2f8 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -80,7 +80,11 @@ namespace art {
Runtime* Runtime::instance_ = NULL;
Runtime::Runtime()
- : compiler_callbacks_(nullptr),
+ : pre_allocated_OutOfMemoryError_(nullptr),
+ resolution_method_(nullptr),
+ imt_conflict_method_(nullptr),
+ default_imt_(nullptr),
+ compiler_callbacks_(nullptr),
is_zygote_(false),
is_concurrent_gc_enabled_(true),
is_explicit_gc_disabled_(false),
@@ -94,10 +98,6 @@ Runtime::Runtime()
class_linker_(nullptr),
signal_catcher_(nullptr),
java_vm_(nullptr),
- pre_allocated_OutOfMemoryError_(nullptr),
- resolution_method_(nullptr),
- imt_conflict_method_(nullptr),
- default_imt_(nullptr),
fault_message_lock_("Fault message lock"),
fault_message_(""),
method_verifier_lock_("Method verifiers lock"),
@@ -839,7 +839,7 @@ void Runtime::DetachCurrentThread() {
void Runtime::VisitConstantRoots(RootCallback* callback, void* arg) {
// Visit the classes held as static in mirror classes, these can be visited concurrently and only
- // need to be visited once since they never change.
+ // need to be visited once per GC since they never change.
mirror::ArtField::VisitRoots(callback, arg);
mirror::ArtMethod::VisitRoots(callback, arg);
mirror::Class::VisitRoots(callback, arg);
@@ -860,6 +860,7 @@ void Runtime::VisitConstantRoots(RootCallback* callback, void* arg) {
void Runtime::VisitConcurrentRoots(RootCallback* callback, void* arg, VisitRootFlags flags) {
intern_table_->VisitRoots(callback, arg, flags);
class_linker_->VisitRoots(callback, arg, flags);
+ Dbg::VisitRoots(callback, arg);
if ((flags & kVisitRootFlagNewRoots) == 0) {
// Guaranteed to have no new roots in the constant roots.
VisitConstantRoots(callback, arg);
@@ -896,6 +897,7 @@ void Runtime::VisitNonThreadRoots(RootCallback* callback, void* arg) {
if (preinitialization_transaction != nullptr) {
preinitialization_transaction->VisitRoots(callback, arg);
}
+ instrumentation_.VisitRoots(callback, arg);
}
void Runtime::VisitNonConcurrentRoots(RootCallback* callback, void* arg) {
diff --git a/runtime/runtime.h b/runtime/runtime.h
index eeaaa2b..50c88d3 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -442,6 +442,12 @@ class Runtime {
// A pointer to the active runtime or NULL.
static Runtime* instance_;
+ mirror::ArtMethod* callee_save_methods_[kLastCalleeSaveType];
+ mirror::Throwable* pre_allocated_OutOfMemoryError_;
+ mirror::ArtMethod* resolution_method_;
+ mirror::ArtMethod* imt_conflict_method_;
+ mirror::ObjectArray<mirror::ArtMethod>* default_imt_;
+
CompilerCallbacks* compiler_callbacks_;
bool is_zygote_;
bool is_concurrent_gc_enabled_;
@@ -475,16 +481,6 @@ class Runtime {
JavaVMExt* java_vm_;
- mirror::Throwable* pre_allocated_OutOfMemoryError_;
-
- mirror::ArtMethod* callee_save_methods_[kLastCalleeSaveType];
-
- mirror::ArtMethod* resolution_method_;
-
- mirror::ArtMethod* imt_conflict_method_;
-
- mirror::ObjectArray<mirror::ArtMethod>* default_imt_;
-
// Fault message, printed when we get a SIGSEGV.
Mutex fault_message_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
std::string fault_message_ GUARDED_BY(fault_message_lock_);
diff --git a/runtime/thread.cc b/runtime/thread.cc
index afa5574..38e4204 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2025,6 +2025,12 @@ void Thread::VisitRoots(RootCallback* visitor, void* arg) {
jni_env_->locals.VisitRoots(visitor, arg, thread_id, kRootJNILocal);
jni_env_->monitors.VisitRoots(visitor, arg, thread_id, kRootJNIMonitor);
SirtVisitRoots(visitor, arg, thread_id);
+ if (debug_invoke_req_ != nullptr) {
+ debug_invoke_req_->VisitRoots(visitor, arg, thread_id, kRootDebugger);
+ }
+ if (single_step_control_ != nullptr) {
+ single_step_control_->VisitRoots(visitor, arg, thread_id, kRootDebugger);
+ }
// Visit roots on this thread's stack
Context* context = GetLongJumpContext();
RootCallbackVisitor visitorToCallback(visitor, arg, thread_id);
@@ -2074,6 +2080,16 @@ void Thread::SetTlab(byte* start, byte* end) {
thread_local_objects_ = 0;
}
+bool Thread::HasTlab() const {
+ bool has_tlab = thread_local_pos_ != nullptr;
+ if (has_tlab) {
+ DCHECK(thread_local_start_ != nullptr && thread_local_end_ != nullptr);
+ } else {
+ DCHECK(thread_local_start_ == nullptr && thread_local_end_ == nullptr);
+ }
+ return has_tlab;
+}
+
std::ostream& operator<<(std::ostream& os, const Thread& thread) {
thread.ShortDump(os);
return os;
diff --git a/runtime/thread.h b/runtime/thread.h
index 6cbd3d9..32875e6 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -95,9 +95,13 @@ enum ThreadFlag {
class PACKED(4) Thread {
public:
// Space to throw a StackOverflowError in.
-#if __LP64__
// TODO: shrink reserved space, in particular for 64bit.
+#if defined(__x86_64__)
static constexpr size_t kStackOverflowReservedBytes = 24 * KB;
+#elif defined(__aarch64__)
+ // Worst-case, we would need about 2.6x the amount of x86_64 for many more registers.
+ // But this one works rather well.
+ static constexpr size_t kStackOverflowReservedBytes = 32 * KB;
#else
static constexpr size_t kStackOverflowReservedBytes = 16 * KB;
#endif
@@ -852,6 +856,7 @@ class PACKED(4) Thread {
// Doesn't check that there is room.
mirror::Object* AllocTlab(size_t bytes);
void SetTlab(byte* start, byte* end);
+ bool HasTlab() const;
// Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
// equal to a valid pointer.
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index fb2d29f..21e3e44 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -3377,7 +3377,7 @@ void MethodVerifier::VerifyPrimitivePut(const RegType& target_type, const RegTyp
}
void MethodVerifier::VerifyAPut(const Instruction* inst,
- const RegType& insn_type, bool is_primitive) {
+ const RegType& insn_type, bool is_primitive) {
const RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
if (!index_type.IsArrayIndexTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Invalid reg type for array index (" << index_type << ")";
@@ -3533,6 +3533,7 @@ void MethodVerifier::VerifyISGet(const Instruction* inst, const RegType& insn_ty
const char* descriptor = dex_file_->GetFieldTypeDescriptor(field_id);
field_type = &reg_types_.FromDescriptor(class_loader_->get(), descriptor, false);
}
+ DCHECK(field_type != nullptr);
const uint32_t vregA = (is_static) ? inst->VRegA_21c() : inst->VRegA_22c();
if (is_primitive) {
if (field_type->Equals(insn_type) ||
@@ -3546,14 +3547,14 @@ void MethodVerifier::VerifyISGet(const Instruction* inst, const RegType& insn_ty
// compile time
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected field " << PrettyField(field)
<< " to be of type '" << insn_type
- << "' but found type '" << field_type << "' in get";
+ << "' but found type '" << *field_type << "' in get";
return;
}
} else {
if (!insn_type.IsAssignableFrom(*field_type)) {
Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "expected field " << PrettyField(field)
<< " to be compatible with type '" << insn_type
- << "' but found type '" << field_type
+ << "' but found type '" << *field_type
<< "' in get-object";
work_line_->SetRegisterType(vregA, reg_types_.Conflict());
return;
@@ -3599,6 +3600,7 @@ void MethodVerifier::VerifyISPut(const Instruction* inst, const RegType& insn_ty
const char* descriptor = dex_file_->GetFieldTypeDescriptor(field_id);
field_type = &reg_types_.FromDescriptor(class_loader_->get(), descriptor, false);
}
+ DCHECK(field_type != nullptr);
const uint32_t vregA = (is_static) ? inst->VRegA_21c() : inst->VRegA_22c();
if (is_primitive) {
VerifyPrimitivePut(*field_type, insn_type, vregA);
@@ -3606,7 +3608,7 @@ void MethodVerifier::VerifyISPut(const Instruction* inst, const RegType& insn_ty
if (!insn_type.IsAssignableFrom(*field_type)) {
Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "expected field " << PrettyField(field)
<< " to be compatible with type '" << insn_type
- << "' but found type '" << field_type
+ << "' but found type '" << *field_type
<< "' in put-object";
return;
}
@@ -3692,6 +3694,7 @@ void MethodVerifier::VerifyIGetQuick(const Instruction* inst, const RegType& ins
field_type = &reg_types_.FromDescriptor(field->GetDeclaringClass()->GetClassLoader(),
fh.GetTypeDescriptor(), false);
}
+ DCHECK(field_type != nullptr);
const uint32_t vregA = inst->VRegA_22c();
if (is_primitive) {
if (field_type->Equals(insn_type) ||
@@ -3705,14 +3708,14 @@ void MethodVerifier::VerifyIGetQuick(const Instruction* inst, const RegType& ins
// compile time
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected field " << PrettyField(field)
<< " to be of type '" << insn_type
- << "' but found type '" << field_type << "' in get";
+ << "' but found type '" << *field_type << "' in get";
return;
}
} else {
if (!insn_type.IsAssignableFrom(*field_type)) {
Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "expected field " << PrettyField(field)
<< " to be compatible with type '" << insn_type
- << "' but found type '" << field_type
+ << "' but found type '" << *field_type
<< "' in get-object";
work_line_->SetRegisterType(vregA, reg_types_.Conflict());
return;
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index 1695fc5..1935a5b 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -471,7 +471,7 @@ TEST_F(RegTypeReferenceTest, Merging) {
EXPECT_EQ(ref_type_1.GetId(), *((++merged_ids.begin())));
}
-TEST_F(RegTypeReferenceTest, MergingFloat) {
+TEST_F(RegTypeTest, MergingFloat) {
// Testing merging logic with float and float constants.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache_new(true);
@@ -502,7 +502,7 @@ TEST_F(RegTypeReferenceTest, MergingFloat) {
}
}
-TEST_F(RegTypeReferenceTest, MergingLong) {
+TEST_F(RegTypeTest, MergingLong) {
// Testing merging logic with long and long constants.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache_new(true);
@@ -556,7 +556,7 @@ TEST_F(RegTypeReferenceTest, MergingLong) {
}
}
-TEST_F(RegTypeReferenceTest, MergingDouble) {
+TEST_F(RegTypeTest, MergingDouble) {
// Testing merging logic with double and double constants.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache_new(true);
diff --git a/runtime/verify_object-inl.h b/runtime/verify_object-inl.h
index e211c83..39df375 100644
--- a/runtime/verify_object-inl.h
+++ b/runtime/verify_object-inl.h
@@ -36,9 +36,8 @@ inline void VerifyObject(mirror::Object* obj) {
bool failed = !IsAligned<kObjectAlignment>(obj);
if (!failed) {
mirror::Class* c = obj->GetClass<kVerifyNone>();
- failed = failed || c == nullptr;
- failed = failed ||!IsAligned<kObjectAlignment>(c);
- failed = failed ||!VerifyClassClass(c);
+ failed = failed || !IsAligned<kObjectAlignment>(c);
+ failed = failed || !VerifyClassClass(c);
}
if (UNLIKELY(failed)) {
Runtime::Current()->GetHeap()->VerifyObjectBody(obj);