summaryrefslogtreecommitdiffstats
path: root/runtime/gc/collector/concurrent_copying.h
blob: 60ea6b644421ac2223e9a4bbe776ee655a3d85a3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
/*
 * Copyright (C) 2014 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
#define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_

#include "barrier.h"
#include "garbage_collector.h"
#include "immune_region.h"
#include "jni.h"
#include "object_callbacks.h"
#include "offsets.h"
#include "gc/accounting/atomic_stack.h"
#include "gc/accounting/read_barrier_table.h"
#include "gc/accounting/space_bitmap.h"
#include "mirror/object.h"
#include "mirror/object_reference.h"
#include "safe_map.h"

#include <unordered_map>
#include <vector>

namespace art {
class RootInfo;

namespace gc {

namespace accounting {
  typedef SpaceBitmap<kObjectAlignment> ContinuousSpaceBitmap;
  class HeapBitmap;
}  // namespace accounting

namespace space {
  class RegionSpace;
}  // namespace space

namespace collector {

// Concurrent queue. Used as the mark stack. TODO: use a concurrent
// stack for locality.
class MarkQueue {
 public:
  explicit MarkQueue(size_t size) : size_(size) {
    CHECK(IsPowerOfTwo(size_));
    buf_.reset(new Atomic<mirror::Object*>[size_]);
    CHECK(buf_.get() != nullptr);
    Clear();
  }

  ALWAYS_INLINE Atomic<mirror::Object*>* GetSlotAddr(size_t index) {
    return &(buf_.get()[index & (size_ - 1)]);
  }

  // Multiple-proceducer enqueue.
  bool Enqueue(mirror::Object* to_ref) {
    size_t t;
    do {
      t = tail_.LoadRelaxed();
      size_t h = head_.LoadSequentiallyConsistent();
      if (t + size_ == h) {
        // It's full.
        return false;
      }
    } while (!tail_.CompareExchangeWeakSequentiallyConsistent(t, t + 1));
    // We got a slot but its content has not been filled yet at this point.
    GetSlotAddr(t)->StoreSequentiallyConsistent(to_ref);
    return true;
  }

  // Thread-unsafe.
  bool EnqueueThreadUnsafe(mirror::Object* to_ref) {
    size_t t = tail_.LoadRelaxed();
    size_t h = head_.LoadRelaxed();
    if (t + size_ == h) {
      // It's full.
      return false;
    }
    GetSlotAddr(t)->StoreRelaxed(to_ref);
    tail_.StoreRelaxed(t + 1);
    return true;
  }

  // Single-consumer dequeue.
  mirror::Object* Dequeue() {
    size_t h = head_.LoadRelaxed();
    size_t t = tail_.LoadSequentiallyConsistent();
    if (h == t) {
      // it's empty.
      return nullptr;
    }
    Atomic<mirror::Object*>* slot = GetSlotAddr(h);
    mirror::Object* ref = slot->LoadSequentiallyConsistent();
    while (ref == nullptr) {
      // Wait until the slot content becomes visible.
      ref = slot->LoadSequentiallyConsistent();
    }
    slot->StoreRelaxed(nullptr);
    head_.StoreSequentiallyConsistent(h + 1);
    return ref;
  }

  bool IsEmpty() {
    size_t h = head_.LoadSequentiallyConsistent();
    size_t t = tail_.LoadSequentiallyConsistent();
    return h == t;
  }

  void Clear() {
    head_.StoreRelaxed(0);
    tail_.StoreRelaxed(0);
    memset(buf_.get(), 0, size_ * sizeof(Atomic<mirror::Object*>));
  }

 private:
  Atomic<size_t> head_;
  Atomic<size_t> tail_;

  size_t size_;
  std::unique_ptr<Atomic<mirror::Object*>[]> buf_;
};

class ConcurrentCopying : public GarbageCollector {
 public:
  // TODO: disable thse flags for production use.
  // Enable the no-from-space-refs verification at the pause.
  static constexpr bool kEnableNoFromSpaceRefsVerification = true;
  // Enable the from-space bytes/objects check.
  static constexpr bool kEnableFromSpaceAccountingCheck = true;
  // Enable verbose mode.
  static constexpr bool kVerboseMode = true;

  ConcurrentCopying(Heap* heap, const std::string& name_prefix = "");
  ~ConcurrentCopying();

  virtual void RunPhases() OVERRIDE;
  void InitializePhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  void MarkingPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  void ReclaimPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  void FinishPhase();

  void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
  virtual GcType GetGcType() const OVERRIDE {
    return kGcTypePartial;
  }
  virtual CollectorType GetCollectorType() const OVERRIDE {
    return kCollectorTypeCC;
  }
  virtual void RevokeAllThreadLocalBuffers() OVERRIDE;
  void SetRegionSpace(space::RegionSpace* region_space) {
    DCHECK(region_space != nullptr);
    region_space_ = region_space;
  }
  space::RegionSpace* RegionSpace() {
    return region_space_;
  }
  void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, mirror::Object* ref)
      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  bool IsInToSpace(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
    DCHECK(ref != nullptr);
    return IsMarked(ref) == ref;
  }
  mirror::Object* Mark(mirror::Object* from_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  bool IsMarking() const {
    return is_marking_;
  }
  bool IsActive() const {
    return is_active_;
  }
  Barrier& GetBarrier() {
    return *gc_barrier_;
  }

 private:
  mirror::Object* PopOffMarkStack();
  template<bool kThreadSafe>
  void PushOntoMarkStack(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  mirror::Object* Copy(mirror::Object* from_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  void Scan(mirror::Object* to_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  void Process(mirror::Object* obj, MemberOffset offset)
      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
                          const RootInfo& info)
      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  void VerifyNoFromSpaceReferences() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
  accounting::ObjectStack* GetAllocationStack();
  accounting::ObjectStack* GetLiveStack();
  bool ProcessMarkStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  void ProcessReferences(Thread* self, bool concurrent)
      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  mirror::Object* IsMarked(mirror::Object* from_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  static mirror::Object* MarkCallback(mirror::Object* from_ref, void* arg)
      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  static mirror::Object* IsMarkedCallback(mirror::Object* from_ref, void* arg)
      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  static bool IsHeapReferenceMarkedCallback(
      mirror::HeapReference<mirror::Object>* field, void* arg)
      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  static void ProcessMarkStackCallback(void* arg)
      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  void SweepSystemWeaks(Thread* self)
      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
  void Sweep(bool swap_bitmaps)
      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
  void SweepLargeObjects(bool swap_bitmaps)
      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
  void ClearBlackPtrs()
      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
  void FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size)
      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  mirror::Object* AllocateInSkippedBlock(size_t alloc_size)
      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  void CheckEmptyMarkQueue() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  void IssueEmptyCheckpoint() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  bool IsOnAllocStack(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  mirror::Object* GetFwdPtr(mirror::Object* from_ref)
      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  void FlipThreadRoots() LOCKS_EXCLUDED(Locks::mutator_lock_);
  void SwapStacks(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
  void RecordLiveStackFreezeSize(Thread* self);
  void ComputeUnevacFromSpaceLiveRatio();

  space::RegionSpace* region_space_;      // The underlying region space.
  std::unique_ptr<Barrier> gc_barrier_;
  MarkQueue mark_queue_;
  bool is_marking_;                       // True while marking is ongoing.
  bool is_active_;                        // True while the collection is ongoing.
  bool is_asserting_to_space_invariant_;  // True while asserting the to-space invariant.
  ImmuneRegion immune_region_;
  std::unique_ptr<accounting::HeapBitmap> cc_heap_bitmap_;
  std::vector<accounting::SpaceBitmap<kObjectAlignment>*> cc_bitmaps_;
  accounting::SpaceBitmap<kObjectAlignment>* region_space_bitmap_;
  // A cache of Heap::GetMarkBitmap().
  accounting::HeapBitmap* heap_mark_bitmap_;
  size_t live_stack_freeze_size_;
  size_t from_space_num_objects_at_first_pause_;
  size_t from_space_num_bytes_at_first_pause_;
  Atomic<int> is_mark_queue_push_disallowed_;

  // How many objects and bytes we moved. Used for accounting.
  Atomic<size_t> bytes_moved_;
  Atomic<size_t> objects_moved_;

  // The skipped blocks are memory blocks/chucks that were copies of
  // objects that were unused due to lost races (cas failures) at
  // object copy/forward pointer install. They are reused.
  Mutex skipped_blocks_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
  std::multimap<size_t, uint8_t*> skipped_blocks_map_ GUARDED_BY(skipped_blocks_lock_);
  Atomic<size_t> to_space_bytes_skipped_;
  Atomic<size_t> to_space_objects_skipped_;

  accounting::ReadBarrierTable* rb_table_;
  bool force_evacuate_all_;  // True if all regions are evacuated.

  friend class ConcurrentCopyingRefFieldsVisitor;
  friend class ConcurrentCopyingImmuneSpaceObjVisitor;
  friend class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor;
  friend class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor;
  friend class ConcurrentCopyingClearBlackPtrsVisitor;
  friend class ConcurrentCopyingLostCopyVisitor;
  friend class ThreadFlipVisitor;
  friend class FlipCallback;
  friend class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor;

  DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying);
};

}  // namespace collector
}  // namespace gc
}  // namespace art

#endif  // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_