summaryrefslogtreecommitdiffstats
path: root/runtime/gc/collector/semi_space.h
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2013-09-13 13:46:47 -0700
committerMathieu Chartier <mathieuc@google.com>2013-11-11 15:34:27 -0800
commit590fee9e8972f872301c2d16a575d579ee564bee (patch)
treeb02db45c72f1911ec896b93379ada0276aea3199 /runtime/gc/collector/semi_space.h
parent5b70680b8df6d8fa95bb8e1070d0107f3d388940 (diff)
downloadart-590fee9e8972f872301c2d16a575d579ee564bee.zip
art-590fee9e8972f872301c2d16a575d579ee564bee.tar.gz
art-590fee9e8972f872301c2d16a575d579ee564bee.tar.bz2
Compacting collector.
The compacting collector is currently similar to semispace. It works by copying objects back and forth between two bump pointer spaces. There are types of objects which are "non-movable" due to current runtime limitations. These are Classes, Methods, and Fields. Bump pointer spaces are a new type of continuous alloc space which have no lock in the allocation code path. When you allocate from these it uses atomic operations to increase an index. Traversing the objects in the bump pointer space relies on Object::SizeOf matching the allocated size exactly. Runtime changes: JNI::GetArrayElements returns copies objects if you attempt to get the backing data of a movable array. For GetArrayElementsCritical, we return direct backing storage for any types of arrays, but temporarily disable the GC until the critical region is completed. Added a new runtime call called VisitObjects, this is used in place of the old pattern which was flushing the allocation stack and walking the bitmaps. Changed image writer to be compaction safe and use object monitor word for forwarding addresses. Added a bunch of added SIRTs to ClassLinker, MethodLinker, etc.. TODO: Enable switching allocators, compacting on background, etc.. Bug: 8981901 Change-Id: I3c886fd322a6eef2b99388d19a765042ec26ab99
Diffstat (limited to 'runtime/gc/collector/semi_space.h')
-rw-r--r--runtime/gc/collector/semi_space.h289
1 files changed, 289 insertions, 0 deletions
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
new file mode 100644
index 0000000..13d5195
--- /dev/null
+++ b/runtime/gc/collector/semi_space.h
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_
+#define ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_
+
+#include "atomic_integer.h"
+#include "barrier.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "garbage_collector.h"
+#include "offsets.h"
+#include "root_visitor.h"
+#include "UniquePtr.h"
+
+namespace art {
+
+namespace mirror {
+ class Class;
+ class Object;
+ template<class T> class ObjectArray;
+} // namespace mirror
+
+class StackVisitor;
+class Thread;
+
+namespace gc {
+
+namespace accounting {
+ template <typename T> class AtomicStack;
+ class MarkIfReachesAllocspaceVisitor;
+ class ModUnionClearCardVisitor;
+ class ModUnionVisitor;
+ class ModUnionTableBitmap;
+ class MarkStackChunk;
+ typedef AtomicStack<mirror::Object*> ObjectStack;
+ class SpaceBitmap;
+} // namespace accounting
+
+namespace space {
+ class BumpPointerSpace;
+ class ContinuousMemMapAllocSpace;
+ class ContinuousSpace;
+} // namespace space
+
+class Heap;
+
+namespace collector {
+
+class SemiSpace : public GarbageCollector {
+ public:
+ explicit SemiSpace(Heap* heap, const std::string& name_prefix = "");
+
+ ~SemiSpace() {}
+
+ virtual void InitializePhase();
+ virtual bool IsConcurrent() const {
+ return false;
+ }
+ virtual void MarkingPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ virtual void ReclaimPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ virtual void FinishPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ virtual void MarkReachableObjects()
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ virtual GcType GetGcType() const {
+ return kGcTypePartial;
+ }
+
+ // Sets which space we will be copying objects to.
+ void SetToSpace(space::ContinuousMemMapAllocSpace* to_space);
+
+ // Set the space where we copy objects from.
+ void SetFromSpace(space::ContinuousMemMapAllocSpace* from_space);
+
+ // Initializes internal structures.
+ void Init();
+
+ // Find the default mark bitmap.
+ void FindDefaultMarkBitmap();
+
+ // Returns the new address of the object.
+ mirror::Object* MarkObject(mirror::Object* object)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ void ScanObject(mirror::Object* obj)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ // Marks the root set at the start of a garbage collection.
+ void MarkRoots()
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ // Make a space immune, immune spaces have all live objects marked - that is the mark and
+ // live bitmaps are bound together.
+ void ImmuneSpace(space::ContinuousSpace* space)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
+ // the image. Mark that portion of the heap as immune.
+ virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void BindLiveToMarkBitmap(space::ContinuousSpace* space)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ void UnBindBitmaps()
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ void ProcessReferences(Thread* self)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Sweeps unmarked objects to complete the garbage collection.
+ virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ // Sweeps unmarked objects to complete the garbage collection.
+ void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ // Sweep only pointers within an array. WARNING: Trashes objects.
+ void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ mirror::Object* GetClearedReferences() {
+ return cleared_reference_list_;
+ }
+
+ // TODO: enable thread safety analysis when in use by multiple worker threads.
+ template <typename MarkVisitor>
+ void ScanObjectVisit(const mirror::Object* obj, const MarkVisitor& visitor)
+ NO_THREAD_SAFETY_ANALYSIS;
+
+ void SweepSystemWeaks()
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ template <typename Visitor>
+ static void VisitObjectReferencesAndClass(mirror::Object* obj, const Visitor& visitor)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ static mirror::Object* MarkRootCallback(mirror::Object* root, void* arg)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ protected:
+ // Returns null if the object is not marked, otherwise returns the forwarding address (same as
+ // object for non movable things).
+ mirror::Object* GetMarkedForwardAddress(mirror::Object* object) const;
+
+ static mirror::Object* SystemWeakIsMarkedCallback(mirror::Object* object, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ // Marks or unmarks a large object based on whether or not set is true. If set is true, then we
+ // mark, otherwise we unmark.
+ bool MarkLargeObject(const mirror::Object* obj)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ // Special sweep for zygote that just marks objects / dirties cards.
+ static void ZygoteSweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+ // Expand mark stack to 2x its current size.
+ void ResizeMarkStack(size_t new_size);
+
+ // Returns how many threads we should use for the current GC phase based on if we are paused,
+ // whether or not we care about pauses.
+ size_t GetThreadCount(bool paused) const;
+
+ // Returns true if an object is inside of the immune region (assumed to be marked).
+ bool IsImmune(const mirror::Object* obj) const ALWAYS_INLINE {
+ return obj >= immune_begin_ && obj < immune_end_;
+ }
+
+ bool IsImmuneSpace(const space::ContinuousSpace* space) const;
+
+ static void VerifyRootCallback(const mirror::Object* root, void* arg, size_t vreg,
+ const StackVisitor *visitor);
+
+ void VerifyRoot(const mirror::Object* root, size_t vreg, const StackVisitor* visitor)
+ NO_THREAD_SAFETY_ANALYSIS;
+
+ template <typename Visitor>
+ static void VisitInstanceFieldsReferences(const mirror::Class* klass, const mirror::Object* obj,
+ const Visitor& visitor)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ // Visit the header, static field references, and interface pointers of a class object.
+ template <typename Visitor>
+ static void VisitClassReferences(const mirror::Class* klass, const mirror::Object* obj,
+ const Visitor& visitor)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ template <typename Visitor>
+ static void VisitStaticFieldsReferences(const mirror::Class* klass, const Visitor& visitor)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ template <typename Visitor>
+ static void VisitFieldsReferences(const mirror::Object* obj, uint32_t ref_offsets, bool is_static,
+ const Visitor& visitor)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ // Visit all of the references in an object array.
+ template <typename Visitor>
+ static void VisitObjectArrayReferences(const mirror::ObjectArray<mirror::Object>* array,
+ const Visitor& visitor)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ // Visits the header and field references of a data object.
+ template <typename Visitor>
+ static void VisitOtherReferences(const mirror::Class* klass, const mirror::Object* obj,
+ const Visitor& visitor)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+ return VisitInstanceFieldsReferences(klass, obj, visitor);
+ }
+
+ // Push an object onto the mark stack.
+ inline void MarkStackPush(mirror::Object* obj);
+
+ void UpdateAndMarkModUnion()
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Schedules an unmarked object for reference processing.
+ void DelayReferenceReferent(mirror::Class* klass, mirror::Object* reference)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ // Recursively blackens objects on the mark stack.
+ void ProcessMarkStack(bool paused)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
+ void EnqueueFinalizerReferences(mirror::Object** ref)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
+ void PreserveSomeSoftReferences(mirror::Object** ref)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
+ void ClearWhiteReferences(mirror::Object** list)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
+ void ProcessReferences(mirror::Object** soft_references, bool clear_soft_references,
+ mirror::Object** weak_references,
+ mirror::Object** finalizer_references,
+ mirror::Object** phantom_references)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
+ inline mirror::Object* GetForwardingAddressInFromSpace(mirror::Object* obj) const;
+
+ mirror::Object* GetForwardingAddress(mirror::Object* obj);
+
+ // Current space, we check this space first to avoid searching for the appropriate space for an
+ // object.
+ accounting::ObjectStack* mark_stack_;
+
+ // Immune range, every object inside the immune range is assumed to be marked.
+ mirror::Object* immune_begin_;
+ mirror::Object* immune_end_;
+
+ // Destination and source spaces.
+ space::ContinuousMemMapAllocSpace* to_space_;
+ space::ContinuousMemMapAllocSpace* from_space_;
+
+ mirror::Object* soft_reference_list_;
+ mirror::Object* weak_reference_list_;
+ mirror::Object* finalizer_reference_list_;
+ mirror::Object* phantom_reference_list_;
+ mirror::Object* cleared_reference_list_;
+
+ Thread* self_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SemiSpace);
+};
+
+} // namespace collector
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_