summaryrefslogtreecommitdiffstats
path: root/runtime/read_barrier-inl.h
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2015-04-03 11:21:55 -0700
committerMathieu Chartier <mathieuc@google.com>2015-04-06 10:44:37 -0700
commitbb87e0f1a52de656bc77cb01cb887e51a0e5198b (patch)
tree113f014c6e20fab3e936a3ac05f9f738639541f6 /runtime/read_barrier-inl.h
parente57fc0f0260fcb1d08cbb720ec95c04c0f394b91 (diff)
downloadart-bb87e0f1a52de656bc77cb01cb887e51a0e5198b.zip
art-bb87e0f1a52de656bc77cb01cb887e51a0e5198b.tar.gz
art-bb87e0f1a52de656bc77cb01cb887e51a0e5198b.tar.bz2
Refactor and improve GC root handling
Changed GcRoot to use compressed references. Changed root visiting to use virtual functions instead of function pointers. Changed root visting interface to be an array of roots instead of a single root at a time. Added buffered root marking helper to avoid dispatch overhead. Root marking seems a bit faster on EvaluateAndApplyChanges due to batch marking. Pause times unaffected. Mips64 is untested but might work, maybe. Before: MarkConcurrentRoots: Sum: 67.678ms 99% C.I. 2us-664.999us Avg: 161.138us Max: 671us After: MarkConcurrentRoots: Sum: 54.806ms 99% C.I. 2us-499.986us Avg: 136.333us Max: 602us Bug: 19264997 Change-Id: I0a71ebb5928f205b9b3f7945b25db6489d5657ca
Diffstat (limited to 'runtime/read_barrier-inl.h')
-rw-r--r--runtime/read_barrier-inl.h42
1 files changed, 42 insertions, 0 deletions
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index c74fded..5631ff4 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -111,6 +111,48 @@ inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root) {
}
}
+// TODO: Reduce copy paste
+template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kMaybeDuringStartup>
+inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<MirrorType>* root) {
+ MirrorType* ref = root->AsMirrorPtr();
+ const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
+ if (with_read_barrier && kUseBakerReadBarrier) {
+ if (kMaybeDuringStartup && IsDuringStartup()) {
+ // During startup, the heap may not be initialized yet. Just
+ // return the given ref.
+ return ref;
+ }
+ // TODO: separate the read barrier code from the collector code more.
+ if (Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarking()) {
+ ref = reinterpret_cast<MirrorType*>(Mark(ref));
+ }
+ AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
+ return ref;
+ } else if (with_read_barrier && kUseBrooksReadBarrier) {
+ // To be implemented.
+ return ref;
+ } else if (with_read_barrier && kUseTableLookupReadBarrier) {
+ if (kMaybeDuringStartup && IsDuringStartup()) {
+ // During startup, the heap may not be initialized yet. Just
+ // return the given ref.
+ return ref;
+ }
+ if (Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
+ auto old_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
+ ref = reinterpret_cast<MirrorType*>(Mark(ref));
+ auto new_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
+ // Update the field atomically. This may fail if mutator updates before us, but it's ok.
+ auto* atomic_root =
+ reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root);
+ atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, new_ref);
+ }
+ AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
+ return ref;
+ } else {
+ return ref;
+ }
+}
+
inline bool ReadBarrier::IsDuringStartup() {
gc::Heap* heap = Runtime::Current()->GetHeap();
if (heap == nullptr) {