summaryrefslogtreecommitdiffstats
path: root/runtime
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2013-08-14 16:14:24 -0700
committerMathieu Chartier <mathieuc@google.com>2013-08-16 13:15:37 -0700
commit02e25119b15a6f619f17db99f5d05124a5807ff3 (patch)
tree7be4cbbf28033e5ee0621565b410fe5d8170a8fb /runtime
parent7d70a7932f0ba09eb01a93caab060aef1403d4e6 (diff)
downloadart-02e25119b15a6f619f17db99f5d05124a5807ff3.zip
art-02e25119b15a6f619f17db99f5d05124a5807ff3.tar.gz
art-02e25119b15a6f619f17db99f5d05124a5807ff3.tar.bz2
Fix up TODO: c++0x, update cpplint.
Needed to update cpplint to handle const auto. Fixed a few cpplint errors that were being missed before. Replaced most of the TODO c++0x with ranged based loops. Loops which do not have a descriptive container name have a concrete type instead of auto. Change-Id: Id7cc0f27030f56057c544e94277300b3f298c9c5
Diffstat (limited to 'runtime')
-rw-r--r--runtime/base/histogram.h2
-rw-r--r--runtime/class_linker.cc49
-rw-r--r--runtime/debugger.cc13
-rw-r--r--runtime/dex_file_verifier.cc3
-rw-r--r--runtime/gc/accounting/gc_allocator.cc6
-rw-r--r--runtime/gc/accounting/gc_allocator.h6
-rw-r--r--runtime/gc/accounting/heap_bitmap-inl.h16
-rw-r--r--runtime/gc/accounting/heap_bitmap.cc46
-rw-r--r--runtime/gc/accounting/heap_bitmap.h16
-rw-r--r--runtime/gc/accounting/mod_union_table.cc149
-rw-r--r--runtime/gc/collector/garbage_collector.cc13
-rw-r--r--runtime/gc/collector/mark_sweep.cc95
-rw-r--r--runtime/gc/collector/mark_sweep.h1
-rw-r--r--runtime/gc/collector/partial_mark_sweep.cc6
-rw-r--r--runtime/gc/collector/partial_mark_sweep.h1
-rw-r--r--runtime/gc/collector/sticky_mark_sweep.cc6
-rw-r--r--runtime/gc/collector/sticky_mark_sweep.h1
-rw-r--r--runtime/gc/heap.cc81
-rw-r--r--runtime/indirect_reference_table.cc5
-rw-r--r--runtime/indirect_reference_table.h10
-rw-r--r--runtime/instrumentation.cc28
-rw-r--r--runtime/intern_table.cc15
-rw-r--r--runtime/intern_table_test.cc3
-rw-r--r--runtime/jni_internal.cc12
-rw-r--r--runtime/mirror/art_method.h1
-rw-r--r--runtime/monitor.cc4
-rw-r--r--runtime/native/dalvik_system_DexFile.cc10
-rw-r--r--runtime/output_stream_test.cc2
-rw-r--r--runtime/reference_table.cc5
-rw-r--r--runtime/thread_list.cc57
-rw-r--r--runtime/thread_list.h2
-rw-r--r--runtime/thread_pool.h3
-rw-r--r--runtime/trace.cc7
-rw-r--r--runtime/verifier/method_verifier.h2
-rw-r--r--runtime/verifier/reg_type.cc8
-rw-r--r--runtime/verifier/reg_type.h1
-rw-r--r--runtime/verifier/register_line.cc5
37 files changed, 224 insertions, 466 deletions
diff --git a/runtime/base/histogram.h b/runtime/base/histogram.h
index f508af9..2a02cf4 100644
--- a/runtime/base/histogram.h
+++ b/runtime/base/histogram.h
@@ -112,6 +112,6 @@ template <class Value> class Histogram {
DISALLOW_COPY_AND_ASSIGN(Histogram);
};
-}
+} // namespace art
#endif // ART_RUNTIME_BASE_HISTOGRAM_H_
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 039e7bc..ef27321 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1111,16 +1111,15 @@ void ClassLinker::VisitRoots(RootVisitor* visitor, void* arg, bool clean_dirty)
Thread* self = Thread::Current();
{
ReaderMutexLock mu(self, dex_lock_);
- for (size_t i = 0; i < dex_caches_.size(); i++) {
- visitor(dex_caches_[i], arg);
+ for (mirror::DexCache* dex_cache : dex_caches_) {
+ visitor(dex_cache, arg);
}
}
{
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
- typedef Table::const_iterator It; // TODO: C++0x auto
- for (It it = classes_.begin(), end = classes_.end(); it != end; ++it) {
- visitor(it->second, arg);
+ for (const std::pair<size_t, mirror::Class*>& it : classes_) {
+ visitor(it.second, arg);
}
// We deliberately ignore the class roots in the image since we
@@ -1135,14 +1134,13 @@ void ClassLinker::VisitRoots(RootVisitor* visitor, void* arg, bool clean_dirty)
void ClassLinker::VisitClasses(ClassVisitor* visitor, void* arg) const {
ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- typedef Table::const_iterator It; // TODO: C++0x auto
- for (It it = classes_.begin(), end = classes_.end(); it != end; ++it) {
- if (!visitor(it->second, arg)) {
+ for (const std::pair<size_t, mirror::Class*>& it : classes_) {
+ if (!visitor(it.second, arg)) {
return;
}
}
- for (It it = image_classes_.begin(), end = image_classes_.end(); it != end; ++it) {
- if (!visitor(it->second, arg)) {
+ for (const std::pair<size_t, mirror::Class*>& it : image_classes_) {
+ if (!visitor(it.second, arg)) {
return;
}
}
@@ -1157,9 +1155,8 @@ static bool GetClassesVisitor(mirror::Class* c, void* arg) {
void ClassLinker::VisitClassesWithoutClassesLock(ClassVisitor* visitor, void* arg) const {
std::set<mirror::Class*> classes;
VisitClasses(GetClassesVisitor, &classes);
- typedef std::set<mirror::Class*>::const_iterator It; // TODO: C++0x auto
- for (It it = classes.begin(), end = classes.end(); it != end; ++it) {
- if (!visitor(*it, arg)) {
+ for (mirror::Class* klass : classes) {
+ if (!visitor(klass, arg)) {
return;
}
}
@@ -2160,10 +2157,9 @@ mirror::Class* ClassLinker::InsertClass(const StringPiece& descriptor, mirror::C
bool ClassLinker::RemoveClass(const char* descriptor, const mirror::ClassLoader* class_loader) {
size_t hash = Hash(descriptor);
WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- typedef Table::iterator It; // TODO: C++0x auto
// TODO: determine if its better to search classes_ or image_classes_ first
ClassHelper kh;
- for (It it = classes_.lower_bound(hash), end = classes_.end(); it != end && it->first == hash;
+ for (auto it = classes_.lower_bound(hash), end = classes_.end(); it != end && it->first == hash;
++it) {
mirror::Class* klass = it->second;
kh.ChangeClass(klass);
@@ -2172,7 +2168,7 @@ bool ClassLinker::RemoveClass(const char* descriptor, const mirror::ClassLoader*
return true;
}
}
- for (It it = image_classes_.lower_bound(hash), end = classes_.end();
+ for (auto it = image_classes_.lower_bound(hash), end = classes_.end();
it != end && it->first == hash; ++it) {
mirror::Class* klass = it->second;
kh.ChangeClass(klass);
@@ -2204,8 +2200,9 @@ mirror::Class* ClassLinker::LookupClassLocked(const char* descriptor,
const mirror::ClassLoader* class_loader,
size_t hash, const Table& classes) {
ClassHelper kh(NULL, this);
- typedef Table::const_iterator It; // TODO: C++0x auto
- for (It it = classes.lower_bound(hash), end = classes_.end(); it != end && it->first == hash; ++it) {
+ auto end = classes_.end();
+ for (auto it = classes.lower_bound(hash); it != end && it->first == hash;
+ ++it) {
mirror::Class* klass = it->second;
kh.ChangeClass(klass);
if (strcmp(descriptor, kh.GetDescriptor()) == 0 && klass->GetClassLoader() == class_loader) {
@@ -2228,17 +2225,18 @@ void ClassLinker::LookupClasses(const char* descriptor, std::vector<mirror::Clas
classes.clear();
size_t hash = Hash(descriptor);
ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- typedef Table::const_iterator It; // TODO: C++0x auto
// TODO: determine if its better to search classes_ or image_classes_ first
ClassHelper kh(NULL, this);
- for (It it = classes_.lower_bound(hash), end = classes_.end(); it != end && it->first == hash; ++it) {
+ for (auto it = classes_.lower_bound(hash), end = classes_.end(); it != end && it->first == hash;
+ ++it) {
mirror::Class* klass = it->second;
kh.ChangeClass(klass);
if (strcmp(descriptor, kh.GetDescriptor()) == 0) {
classes.push_back(klass);
}
}
- for (It it = image_classes_.lower_bound(hash), end = classes_.end(); it != end && it->first == hash; ++it) {
+ for (auto it = image_classes_.lower_bound(hash), end = classes_.end();
+ it != end && it->first == hash; ++it) {
mirror::Class* klass = it->second;
kh.ChangeClass(klass);
if (strcmp(descriptor, kh.GetDescriptor()) == 0) {
@@ -3967,12 +3965,11 @@ void ClassLinker::DumpAllClasses(int flags) const {
std::vector<mirror::Class*> all_classes;
{
ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- typedef Table::const_iterator It; // TODO: C++0x auto
- for (It it = classes_.begin(), end = classes_.end(); it != end; ++it) {
- all_classes.push_back(it->second);
+ for (const std::pair<size_t, mirror::Class*>& it : classes_) {
+ all_classes.push_back(it.second);
}
- for (It it = image_classes_.begin(), end = image_classes_.end(); it != end; ++it) {
- all_classes.push_back(it->second);
+ for (const std::pair<size_t, mirror::Class*>& it : image_classes_) {
+ all_classes.push_back(it.second);
}
}
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 569a370..a72ae22 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -3033,9 +3033,8 @@ void Dbg::DdmSetThreadNotification(bool enable) {
}
{
ScopedObjectAccess soa(self);
- typedef std::list<Thread*>::const_iterator It; // TODO: C++0x auto
- for (It it = threads.begin(), end = threads.end(); it != end; ++it) {
- Dbg::DdmSendThreadNotification(*it, CHUNK_TYPE("THCR"));
+ for (Thread* thread : threads) {
+ Dbg::DdmSendThreadNotification(thread, CHUNK_TYPE("THCR"));
}
}
ResumeVM();
@@ -3600,8 +3599,7 @@ class StringTable {
}
size_t IndexOf(const char* s) const {
- typedef std::set<std::string>::const_iterator It; // TODO: C++0x auto
- It it = table_.find(s);
+ auto it = table_.find(s);
if (it == table_.end()) {
LOG(FATAL) << "IndexOf(\"" << s << "\") failed";
}
@@ -3613,9 +3611,8 @@ class StringTable {
}
void WriteTo(std::vector<uint8_t>& bytes) const {
- typedef std::set<std::string>::const_iterator It; // TODO: C++0x auto
- for (It it = table_.begin(); it != table_.end(); ++it) {
- const char* s = (*it).c_str();
+ for (const std::string& str : table_) {
+ const char* s = str.c_str();
size_t s_len = CountModifiedUtf8Chars(s);
UniquePtr<uint16_t> s_utf16(new uint16_t[s_len]);
ConvertModifiedUtf8ToUtf16(s_utf16.get(), s);
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index 09e929c..5b076e0 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -1299,8 +1299,7 @@ bool DexFileVerifier::CheckIntraSection() {
}
bool DexFileVerifier::CheckOffsetToTypeMap(uint32_t offset, uint16_t type) {
- typedef SafeMap<uint32_t, uint16_t>::iterator It; // TODO: C++0x auto
- It it = offset_to_type_map_.find(offset);
+ auto it = offset_to_type_map_.find(offset);
if (it == offset_to_type_map_.end()) {
LOG(ERROR) << StringPrintf("No data map entry found @ %x; expected %x", offset, type);
return false;
diff --git a/runtime/gc/accounting/gc_allocator.cc b/runtime/gc/accounting/gc_allocator.cc
index 0b0d3ed..11d0e67 100644
--- a/runtime/gc/accounting/gc_allocator.cc
+++ b/runtime/gc/accounting/gc_allocator.cc
@@ -31,6 +31,6 @@ namespace accounting {
Runtime::Current()->GetHeap()->RegisterGCDeAllocation(bytes);
free(p);
}
-}
-}
-}
+} // namespace accounting
+} // namespace gc
+} // namespace art
diff --git a/runtime/gc/accounting/gc_allocator.h b/runtime/gc/accounting/gc_allocator.h
index d1356a7..1fba858 100644
--- a/runtime/gc/accounting/gc_allocator.h
+++ b/runtime/gc/accounting/gc_allocator.h
@@ -75,8 +75,8 @@ namespace accounting {
GCAllocatorImpl<T>,
std::allocator<T> >::value {
};
-}
-}
-}
+} // namespace accounting
+} // namespace gc
+} // namespace art
#endif // ART_RUNTIME_GC_ACCOUNTING_GC_ALLOCATOR_H_
diff --git a/runtime/gc/accounting/heap_bitmap-inl.h b/runtime/gc/accounting/heap_bitmap-inl.h
index 0524ccb..18b93d4 100644
--- a/runtime/gc/accounting/heap_bitmap-inl.h
+++ b/runtime/gc/accounting/heap_bitmap-inl.h
@@ -25,20 +25,12 @@ namespace accounting {
template <typename Visitor>
inline void HeapBitmap::Visit(const Visitor& visitor) {
- // TODO: C++0x auto
- typedef SpaceBitmapVector::iterator It;
- for (It it = continuous_space_bitmaps_.begin(), end = continuous_space_bitmaps_.end();
- it != end; ++it) {
- SpaceBitmap* bitmap = *it;
+ for (const auto& bitmap : continuous_space_bitmaps_) {
bitmap->VisitMarkedRange(bitmap->HeapBegin(), bitmap->HeapLimit(), visitor);
}
- // TODO: C++0x auto
- typedef SpaceSetMapVector::iterator It2;
- DCHECK(discontinuous_space_sets_.begin() != discontinuous_space_sets_.end());
- for (It2 it = discontinuous_space_sets_.begin(), end = discontinuous_space_sets_.end();
- it != end; ++it) {
- SpaceSetMap* set = *it;
- set->Visit(visitor);
+ DCHECK(!discontinuous_space_sets_.empty());
+ for (const auto& space_set : discontinuous_space_sets_) {
+ space_set->Visit(visitor);
}
}
diff --git a/runtime/gc/accounting/heap_bitmap.cc b/runtime/gc/accounting/heap_bitmap.cc
index 0462905..5589461 100644
--- a/runtime/gc/accounting/heap_bitmap.cc
+++ b/runtime/gc/accounting/heap_bitmap.cc
@@ -23,12 +23,9 @@ namespace gc {
namespace accounting {
void HeapBitmap::ReplaceBitmap(SpaceBitmap* old_bitmap, SpaceBitmap* new_bitmap) {
- // TODO: C++0x auto
- typedef SpaceBitmapVector::iterator It;
- for (It it = continuous_space_bitmaps_.begin(), end = continuous_space_bitmaps_.end();
- it != end; ++it) {
- if (*it == old_bitmap) {
- *it = new_bitmap;
+ for (auto& bitmap : continuous_space_bitmaps_) {
+ if (bitmap == old_bitmap) {
+ bitmap = new_bitmap;
return;
}
}
@@ -36,12 +33,9 @@ void HeapBitmap::ReplaceBitmap(SpaceBitmap* old_bitmap, SpaceBitmap* new_bitmap)
}
void HeapBitmap::ReplaceObjectSet(SpaceSetMap* old_set, SpaceSetMap* new_set) {
- // TODO: C++0x auto
- typedef SpaceSetMapVector::iterator It;
- for (It it = discontinuous_space_sets_.begin(), end = discontinuous_space_sets_.end();
- it != end; ++it) {
- if (*it == old_set) {
- *it = new_set;
+ for (auto& space_set : discontinuous_space_sets_) {
+ if (space_set == old_set) {
+ space_set = new_set;
return;
}
}
@@ -52,13 +46,10 @@ void HeapBitmap::AddContinuousSpaceBitmap(accounting::SpaceBitmap* bitmap) {
DCHECK(bitmap != NULL);
// Check for interval overlap.
- typedef SpaceBitmapVector::iterator It;
- for (It it = continuous_space_bitmaps_.begin(), end = continuous_space_bitmaps_.end();
- it != end; ++it) {
- SpaceBitmap* bitmap = *it;
- SpaceBitmap* cur_bitmap = *it;
- CHECK(bitmap->HeapBegin() < cur_bitmap->HeapLimit() &&
- bitmap->HeapLimit() > cur_bitmap->HeapBegin())
+ for (const auto& cur_bitmap : continuous_space_bitmaps_) {
+ CHECK(!(
+ bitmap->HeapBegin() < cur_bitmap->HeapLimit() &&
+ bitmap->HeapLimit() > cur_bitmap->HeapBegin()))
<< "Bitmap " << bitmap->Dump() << " overlaps with existing bitmap " << cur_bitmap->Dump();
}
continuous_space_bitmaps_.push_back(bitmap);
@@ -70,20 +61,13 @@ void HeapBitmap::AddDiscontinuousObjectSet(SpaceSetMap* set) {
}
void HeapBitmap::Walk(SpaceBitmap::Callback* callback, void* arg) {
- // TODO: C++0x auto
- typedef SpaceBitmapVector::iterator It;
- for (It it = continuous_space_bitmaps_.begin(), end = continuous_space_bitmaps_.end();
- it != end; ++it) {
- SpaceBitmap* bitmap = *it;
+ for (const auto& bitmap : continuous_space_bitmaps_) {
bitmap->Walk(callback, arg);
}
- // TODO: C++0x auto
- typedef SpaceSetMapVector::iterator It2;
- DCHECK(discontinuous_space_sets_.begin() != discontinuous_space_sets_.end());
- for (It2 it = discontinuous_space_sets_.begin(), end = discontinuous_space_sets_.end();
- it != end; ++it) {
- SpaceSetMap* set = *it;
- set->Walk(callback, arg);
+
+ DCHECK(!discontinuous_space_sets_.empty());
+ for (const auto& space_set : discontinuous_space_sets_) {
+ space_set->Walk(callback, arg);
}
}
diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h
index ada976f..2ca8c4a 100644
--- a/runtime/gc/accounting/heap_bitmap.h
+++ b/runtime/gc/accounting/heap_bitmap.h
@@ -66,11 +66,7 @@ class HeapBitmap {
}
SpaceBitmap* GetContinuousSpaceBitmap(const mirror::Object* obj) {
- // TODO: C++0x auto
- typedef SpaceBitmapVector::iterator It;
- for (It it = continuous_space_bitmaps_.begin(), end = continuous_space_bitmaps_.end();
- it != end; ++it) {
- SpaceBitmap* bitmap = *it;
+ for (const auto& bitmap : continuous_space_bitmaps_) {
if (bitmap->HasAddress(obj)) {
return bitmap;
}
@@ -79,13 +75,9 @@ class HeapBitmap {
}
SpaceSetMap* GetDiscontinuousSpaceObjectSet(const mirror::Object* obj) {
- // TODO: C++0x auto
- typedef SpaceSetMapVector::iterator It;
- for (It it = discontinuous_space_sets_.begin(), end = discontinuous_space_sets_.end();
- it != end; ++it) {
- SpaceSetMap* set = *it;
- if (set->Test(obj)) {
- return set;
+ for (const auto& space_set : discontinuous_space_sets_) {
+ if (space_set->Test(obj)) {
+ return space_set;
}
}
return NULL;
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 3bbc381..4865219 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -36,54 +36,6 @@ namespace art {
namespace gc {
namespace accounting {
-class MarkIfReachesAllocspaceVisitor {
- public:
- explicit MarkIfReachesAllocspaceVisitor(Heap* const heap, accounting::SpaceBitmap* bitmap)
- : heap_(heap),
- bitmap_(bitmap) {
- }
-
- // Extra parameters are required since we use this same visitor signature for checking objects.
- void operator()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
- bool /* is_static */) const {
- // TODO: Optimize?
- // TODO: C++0x auto
- const std::vector<space::ContinuousSpace*>& spaces = heap_->GetContinuousSpaces();
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It cur = spaces.begin(); cur != spaces.end(); ++cur) {
- if ((*cur)->IsDlMallocSpace() && (*cur)->Contains(ref)) {
- bitmap_->Set(obj);
- break;
- }
- }
- }
-
- private:
- Heap* const heap_;
- accounting::SpaceBitmap* const bitmap_;
-};
-
-class ModUnionVisitor {
- public:
- explicit ModUnionVisitor(Heap* const heap, accounting::SpaceBitmap* bitmap)
- : heap_(heap),
- bitmap_(bitmap) {
- }
-
- void operator()(const Object* obj) const
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
- Locks::mutator_lock_) {
- DCHECK(obj != NULL);
- // We don't have an early exit since we use the visitor pattern, an early exit should
- // significantly speed this up.
- MarkIfReachesAllocspaceVisitor visitor(heap_, bitmap_);
- collector::MarkSweep::VisitObjectReferences(obj, visitor);
- }
- private:
- Heap* const heap_;
- accounting::SpaceBitmap* const bitmap_;
-};
-
class ModUnionClearCardSetVisitor {
public:
explicit ModUnionClearCardSetVisitor(ModUnionTable::CardSet* const cleared_cards)
@@ -237,29 +189,23 @@ class ModUnionCheckReferences {
void ModUnionTableReferenceCache::Verify() {
// Start by checking that everything in the mod union table is marked.
Heap* heap = GetHeap();
- typedef SafeMap<const byte*, std::vector<const Object*> >::const_iterator It;
- typedef std::vector<const Object*>::const_iterator It2;
- for (It it = references_.begin(), end = references_.end(); it != end; ++it) {
- for (It2 it_ref = it->second.begin(), end_ref = it->second.end(); it_ref != end_ref;
- ++it_ref ) {
- CHECK(heap->IsLiveObjectLocked(*it_ref));
+ for (const std::pair<const byte*, std::vector<const Object*> >& it : references_) {
+ for (const Object* ref : it.second) {
+ CHECK(heap->IsLiveObjectLocked(ref));
}
}
// Check the references of each clean card which is also in the mod union table.
CardTable* card_table = heap->GetCardTable();
- for (It it = references_.begin(); it != references_.end(); ++it) {
- const byte* card = &*it->first;
+ for (const std::pair<const byte*, std::vector<const Object*> > & it : references_) {
+ const byte* card = it.first;
if (*card == CardTable::kCardClean) {
- std::set<const Object*> reference_set;
- for (It2 itr = it->second.begin(); itr != it->second.end(); ++itr) {
- reference_set.insert(*itr);
- }
+ std::set<const Object*> reference_set(it.second.begin(), it.second.end());
ModUnionCheckReferences visitor(this, reference_set);
uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
uintptr_t end = start + CardTable::kCardSize;
- space::ContinuousSpace* space =
- heap->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false);
+ auto* space = heap->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false);
+ DCHECK(space != nullptr);
SpaceBitmap* live_bitmap = space->GetLiveBitmap();
live_bitmap->VisitMarkedRange(start, end, visitor);
}
@@ -268,24 +214,20 @@ void ModUnionTableReferenceCache::Verify() {
void ModUnionTableReferenceCache::Dump(std::ostream& os) {
CardTable* card_table = heap_->GetCardTable();
- typedef std::set<byte*>::const_iterator It;
os << "ModUnionTable cleared cards: [";
- for (It it = cleared_cards_.begin(); it != cleared_cards_.end(); ++it) {
- byte* card = *it;
- uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
+ for (byte* card_addr : cleared_cards_) {
+ uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
uintptr_t end = start + CardTable::kCardSize;
os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << ",";
}
os << "]\nModUnionTable references: [";
- typedef SafeMap<const byte*, std::vector<const Object*> >::const_iterator It2;
- for (It2 it = references_.begin(); it != references_.end(); ++it) {
- const byte* card = &*it->first;
- uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
+ for (const std::pair<const byte*, std::vector<const Object*> >& it : references_) {
+ const byte* card_addr = it.first;
+ uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
uintptr_t end = start + CardTable::kCardSize;
os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << "->{";
- typedef std::vector<const Object*>::const_iterator It3;
- for (It3 itr = it->second.begin(); itr != it->second.end(); ++itr) {
- os << reinterpret_cast<const void*>(*itr) << ",";
+ for (const mirror::Object* ref : it.second) {
+ os << reinterpret_cast<const void*>(ref) << ",";
}
os << "},";
}
@@ -298,20 +240,18 @@ void ModUnionTableReferenceCache::Update() {
std::vector<const Object*> cards_references;
ModUnionReferenceVisitor visitor(this, &cards_references);
- typedef std::set<byte*>::iterator It;
- for (It it = cleared_cards_.begin(), cc_end = cleared_cards_.end(); it != cc_end; ++it) {
- byte* card = *it;
+ for (const auto& card : cleared_cards_) {
// Clear and re-compute alloc space references associated with this card.
cards_references.clear();
uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
uintptr_t end = start + CardTable::kCardSize;
- SpaceBitmap* live_bitmap =
- heap->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false)->GetLiveBitmap();
+ auto* space = heap->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false);
+ DCHECK(space != nullptr);
+ SpaceBitmap* live_bitmap = space->GetLiveBitmap();
live_bitmap->VisitMarkedRange(start, end, visitor);
// Update the corresponding references for the card.
- // TODO: C++0x auto
- SafeMap<const byte*, std::vector<const Object*> >::iterator found = references_.find(card);
+ auto found = references_.find(card);
if (found == references_.end()) {
if (cards_references.empty()) {
// No reason to add empty array.
@@ -326,14 +266,11 @@ void ModUnionTableReferenceCache::Update() {
}
void ModUnionTableReferenceCache::MarkReferences(collector::MarkSweep* mark_sweep) {
- // TODO: C++0x auto
size_t count = 0;
- typedef SafeMap<const byte*, std::vector<const Object*> >::const_iterator It;
- for (It it = references_.begin(); it != references_.end(); ++it) {
- typedef std::vector<const Object*>::const_iterator It2;
- for (It2 it_ref = it->second.begin(); it_ref != it->second.end(); ++it_ref) {
- mark_sweep->MarkRoot(*it_ref);
+ for (const auto& ref : references_) {
+ for (const auto& obj : ref.second) {
+ mark_sweep->MarkRoot(obj);
++count;
}
}
@@ -353,38 +290,28 @@ void ModUnionTableCardCache::ClearCards(space::ContinuousSpace* space) {
void ModUnionTableCardCache::MarkReferences(collector::MarkSweep* mark_sweep) {
CardTable* card_table = heap_->GetCardTable();
ModUnionScanImageRootVisitor visitor(mark_sweep);
- typedef std::set<byte*>::const_iterator It;
- It it = cleared_cards_.begin();
- It cc_end = cleared_cards_.end();
- if (it != cc_end) {
- byte* card = *it;
- uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
- uintptr_t end = start + CardTable::kCardSize;
- space::ContinuousSpace* cur_space =
- heap_->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false);
- accounting::SpaceBitmap* cur_live_bitmap = cur_space->GetLiveBitmap();
- cur_live_bitmap->VisitMarkedRange(start, end, visitor);
- for (++it; it != cc_end; ++it) {
- card = *it;
- start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
- end = start + CardTable::kCardSize;
- if (UNLIKELY(!cur_space->Contains(reinterpret_cast<Object*>(start)))) {
- cur_space = heap_->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false);
- cur_live_bitmap = cur_space->GetLiveBitmap();
- }
- cur_live_bitmap->VisitMarkedRange(start, end, visitor);
+ space::ContinuousSpace* space = nullptr;
+ SpaceBitmap* bitmap = nullptr;
+ for (const byte* card_addr : cleared_cards_) {
+ auto start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
+ auto end = start + CardTable::kCardSize;
+ auto obj_start = reinterpret_cast<Object*>(start);
+ if (UNLIKELY(space == nullptr || !space->Contains(obj_start))) {
+ space = heap_->FindContinuousSpaceFromObject(obj_start, false);
+ DCHECK(space != nullptr);
+ bitmap = space->GetLiveBitmap();
+ DCHECK(bitmap != nullptr);
}
+ bitmap->VisitMarkedRange(start, end, visitor);
}
}
void ModUnionTableCardCache::Dump(std::ostream& os) {
CardTable* card_table = heap_->GetCardTable();
- typedef std::set<byte*>::const_iterator It;
os << "ModUnionTable dirty cards: [";
- for (It it = cleared_cards_.begin(); it != cleared_cards_.end(); ++it) {
- byte* card = *it;
- uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
- uintptr_t end = start + CardTable::kCardSize;
+ for (const byte* card_addr : cleared_cards_) {
+ auto start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
+ auto end = start + CardTable::kCardSize;
os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << ",";
}
os << "]";
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 378a971..9260137 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -114,11 +114,7 @@ void GarbageCollector::SwapBitmaps() {
// these bitmaps. The bitmap swapping is an optimization so that we do not need to clear the live
// bits of dead objects in the live bitmap.
const GcType gc_type = GetGcType();
- const std::vector<space::ContinuousSpace*>& cont_spaces = GetHeap()->GetContinuousSpaces();
- // TODO: C++0x
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = cont_spaces.begin(), end = cont_spaces.end(); it != end; ++it) {
- space::ContinuousSpace* space = *it;
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
// We never allocate into zygote spaces.
if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect ||
(gc_type == kGcTypeFull &&
@@ -132,11 +128,8 @@ void GarbageCollector::SwapBitmaps() {
}
}
}
- const std::vector<space::DiscontinuousSpace*>& disc_spaces = GetHeap()->GetDiscontinuousSpaces();
- // TODO: C++0x
- typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2;
- for (It2 it = disc_spaces.begin(), end = disc_spaces.end(); it != end; ++it) {
- space::LargeObjectSpace* space = down_cast<space::LargeObjectSpace*>(*it);
+ for (const auto& disc_space : GetHeap()->GetDiscontinuousSpaces()) {
+ space::LargeObjectSpace* space = down_cast<space::LargeObjectSpace*>(disc_space);
accounting::SpaceSetMap* live_set = space->GetLiveObjects();
accounting::SpaceSetMap* mark_set = space->GetMarkObjects();
heap_->GetLiveBitmap()->ReplaceObjectSet(live_set, mark_set);
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 61570ae..e93bcd1 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -84,16 +84,13 @@ void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) {
SetImmuneRange(reinterpret_cast<Object*>(space->Begin()),
reinterpret_cast<Object*>(space->End()));
} else {
- const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
- const space::ContinuousSpace* prev_space = NULL;
+ const space::ContinuousSpace* prev_space = nullptr;
// Find out if the previous space is immune.
- // TODO: C++0x
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
- if (*it == space) {
+ for (space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) {
+ if (cur_space == space) {
break;
}
- prev_space = *it;
+ prev_space = cur_space;
}
// If previous space was immune, then extend the immune region. Relies on continuous spaces
@@ -322,13 +319,9 @@ void MarkSweep::SetImmuneRange(Object* begin, Object* end) {
void MarkSweep::FindDefaultMarkBitmap() {
base::TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_);
- const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
- // TODO: C++0x
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
- space::ContinuousSpace* space = *it;
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
- current_mark_bitmap_ = (*it)->GetMarkBitmap();
+ current_mark_bitmap_ = space->GetMarkBitmap();
CHECK(current_mark_bitmap_ != NULL);
return;
}
@@ -344,11 +337,10 @@ void MarkSweep::ExpandMarkStack() {
// Someone else acquired the lock and expanded the mark stack before us.
return;
}
- std::vector<Object*> temp;
- temp.insert(temp.begin(), mark_stack_->Begin(), mark_stack_->End());
+ std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
mark_stack_->Resize(mark_stack_->Capacity() * 2);
- for (size_t i = 0; i < temp.size(); ++i) {
- mark_stack_->PushBack(temp[i]);
+ for (const auto& obj : temp) {
+ mark_stack_->PushBack(obj);
}
}
@@ -608,12 +600,8 @@ class ScanObjectVisitor {
void MarkSweep::ScanGrayObjects(byte minimum_age) {
accounting::CardTable* card_table = GetHeap()->GetCardTable();
- const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
ScanObjectVisitor visitor(this);
- // TODO: C++0x
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(), space_end = spaces.end(); it != space_end; ++it) {
- space::ContinuousSpace* space = *it;
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
switch (space->GetGcRetentionPolicy()) {
case space::kGcRetentionPolicyNeverCollect:
timings_.StartSplit("ScanGrayImageSpaceObjects");
@@ -656,15 +644,12 @@ void MarkSweep::VerifyImageRoots() {
// space
timings_.StartSplit("VerifyImageRoots");
CheckBitmapVisitor visitor(this);
- const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
- // TODO: C++0x
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
- if ((*it)->IsImageSpace()) {
- space::ImageSpace* space = (*it)->AsImageSpace();
- uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
- uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
- accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+ if (space->IsImageSpace()) {
+ space::ImageSpace* image_space = space->AsImageSpace();
+ uintptr_t begin = reinterpret_cast<uintptr_t>(image_space->Begin());
+ uintptr_t end = reinterpret_cast<uintptr_t>(image_space->End());
+ accounting::SpaceBitmap* live_bitmap = image_space->GetLiveBitmap();
DCHECK(live_bitmap != NULL);
live_bitmap->VisitMarkedRange(begin, end, visitor);
}
@@ -687,11 +672,7 @@ void MarkSweep::RecursiveMark() {
const bool partial = GetGcType() == kGcTypePartial;
ScanObjectVisitor scan_visitor(this);
if (!kDisableFinger) {
- const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
- // TODO: C++0x
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
- space::ContinuousSpace* space = *it;
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
(!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
current_mark_bitmap_ = space->GetMarkBitmap();
@@ -729,10 +710,7 @@ void MarkSweep::ReMarkRoots() {
void MarkSweep::SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) {
JavaVMExt* vm = Runtime::Current()->GetJavaVM();
MutexLock mu(Thread::Current(), vm->weak_globals_lock);
- IndirectReferenceTable* table = &vm->weak_globals;
- typedef IndirectReferenceTable::iterator It; // TODO: C++0x auto
- for (It it = table->begin(), end = table->end(); it != end; ++it) {
- const Object** entry = *it;
+ for (const Object** entry : vm->weak_globals) {
if (!is_marked(*entry, arg)) {
*entry = kClearedJniWeakGlobal;
}
@@ -815,10 +793,7 @@ void MarkSweep::VerifySystemWeaks() {
JavaVMExt* vm = runtime->GetJavaVM();
MutexLock mu(Thread::Current(), vm->weak_globals_lock);
- IndirectReferenceTable* table = &vm->weak_globals;
- typedef IndirectReferenceTable::iterator It; // TODO: C++0x auto
- for (It it = table->begin(), end = table->end(); it != end; ++it) {
- const Object** entry = *it;
+ for (const Object** entry : vm->weak_globals) {
VerifyIsLive(*entry);
}
}
@@ -988,11 +963,7 @@ void MarkSweep::Sweep(bool swap_bitmaps) {
SweepCallbackContext scc;
scc.mark_sweep = this;
scc.self = Thread::Current();
- const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
- // TODO: C++0x
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
- space::ContinuousSpace* space = *it;
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
// We always sweep always collect spaces.
bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect);
if (!partial && !sweep_space) {
@@ -1040,11 +1011,9 @@ void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
size_t freed_objects = 0;
size_t freed_bytes = 0;
Thread* self = Thread::Current();
- // TODO: C++0x
- typedef accounting::SpaceSetMap::Objects::iterator It;
- for (It it = live_objects.begin(), end = live_objects.end(); it != end; ++it) {
- if (!large_mark_objects->Test(*it)) {
- freed_bytes += large_object_space->Free(self, const_cast<Object*>(*it));
+ for (const Object* obj : live_objects) {
+ if (!large_mark_objects->Test(obj)) {
+ freed_bytes += large_object_space->Free(self, const_cast<Object*>(obj));
++freed_objects;
}
}
@@ -1054,11 +1023,7 @@ void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
}
void MarkSweep::CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) {
- const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
- // TODO: C++0x
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
- space::ContinuousSpace* space = *it;
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if (space->IsDlMallocSpace() && space->Contains(ref)) {
DCHECK(IsMarked(obj));
@@ -1508,11 +1473,7 @@ void MarkSweep::ProcessReferences(Object** soft_references, bool clear_soft,
void MarkSweep::UnBindBitmaps() {
base::TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
- const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
- // TODO: C++0x
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
- space::ContinuousSpace* space = *it;
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if (space->IsDlMallocSpace()) {
space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
if (alloc_space->temp_bitmap_.get() != NULL) {
@@ -1585,11 +1546,7 @@ void MarkSweep::FinishPhase() {
cumulative_timings_.End();
// Clear all of the spaces' mark bitmaps.
- const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
- // TODO: C++0x
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
- space::ContinuousSpace* space = *it;
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if (space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
space->GetMarkBitmap()->Clear();
}
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index e39e2f7..8db03d3 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -428,6 +428,7 @@ class MarkSweep : public GarbageCollector {
bool clear_soft_references_;
+ private:
friend class AddIfReachesAllocSpaceVisitor; // Used by mod-union table.
friend class CheckBitmapVisitor;
friend class CheckObjectVisitor;
diff --git a/runtime/gc/collector/partial_mark_sweep.cc b/runtime/gc/collector/partial_mark_sweep.cc
index ef893c5..cc3cfe5 100644
--- a/runtime/gc/collector/partial_mark_sweep.cc
+++ b/runtime/gc/collector/partial_mark_sweep.cc
@@ -33,14 +33,10 @@ PartialMarkSweep::PartialMarkSweep(Heap* heap, bool is_concurrent, const std::st
void PartialMarkSweep::BindBitmaps() {
MarkSweep::BindBitmaps();
- const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
// For partial GCs we need to bind the bitmap of the zygote space so that all objects in the
// zygote space are viewed as marked.
- // TODO: C++0x
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
- space::ContinuousSpace* space = *it;
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
CHECK(space->IsZygoteSpace());
ImmuneSpace(space);
diff --git a/runtime/gc/collector/partial_mark_sweep.h b/runtime/gc/collector/partial_mark_sweep.h
index 25304b9..3b788f4 100644
--- a/runtime/gc/collector/partial_mark_sweep.h
+++ b/runtime/gc/collector/partial_mark_sweep.h
@@ -38,6 +38,7 @@ class PartialMarkSweep : public MarkSweep {
// collections, ie the Zygote space. Also mark this space is immune.
virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ private:
DISALLOW_COPY_AND_ASSIGN(PartialMarkSweep);
};
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index aad7c29..008d3e0 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -33,15 +33,11 @@ StickyMarkSweep::StickyMarkSweep(Heap* heap, bool is_concurrent, const std::stri
void StickyMarkSweep::BindBitmaps() {
PartialMarkSweep::BindBitmaps();
- const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
// For sticky GC, we want to bind the bitmaps of all spaces as the allocation stack lets us
// know what was allocated since the last GC. A side-effect of binding the allocation space mark
// and live bitmap is that marking the objects will place them in the live bitmap.
- // TODO: C++0x
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
- space::ContinuousSpace* space = *it;
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
BindLiveToMarkBitmap(space);
}
diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h
index e009b62..2099c79 100644
--- a/runtime/gc/collector/sticky_mark_sweep.h
+++ b/runtime/gc/collector/sticky_mark_sweep.h
@@ -45,6 +45,7 @@ class StickyMarkSweep : public PartialMarkSweep {
void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ private:
DISALLOW_COPY_AND_ASSIGN(StickyMarkSweep);
};
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index a2453b8..d5a8d75 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -183,11 +183,8 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
heap_capacity += continuous_spaces_.back()->AsDlMallocSpace()->NonGrowthLimitCapacity();
}
- // Mark image objects in the live bitmap
- // TODO: C++0x
- typedef std::vector<space::ContinuousSpace*>::iterator It;
- for (It it = continuous_spaces_.begin(); it != continuous_spaces_.end(); ++it) {
- space::ContinuousSpace* space = *it;
+ // Mark image objects in the live bitmap.
+ for (const auto& space : continuous_spaces_) {
if (space->IsImageSpace()) {
space::ImageSpace* image_space = space->AsImageSpace();
image_space->RecordImageAllocations(image_space->GetLiveBitmap());
@@ -393,9 +390,7 @@ void Heap::AddContinuousSpace(space::ContinuousSpace* space) {
// Ensure that ImageSpaces < ZygoteSpaces < AllocSpaces so that we can do address based checks to
// avoid redundant marking.
bool seen_zygote = false, seen_alloc = false;
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = continuous_spaces_.begin(); it != continuous_spaces_.end(); ++it) {
- space::ContinuousSpace* space = *it;
+ for (const auto& space : continuous_spaces_) {
if (space->IsImageSpace()) {
DCHECK(!seen_zygote);
DCHECK(!seen_alloc);
@@ -436,17 +431,13 @@ void Heap::DumpGcPerformanceInfo(std::ostream& os) {
uint64_t total_duration = 0;
// Dump cumulative loggers for each GC type.
- // TODO: C++0x
uint64_t total_paused_time = 0;
- typedef std::vector<collector::MarkSweep*>::const_iterator It;
- for (It it = mark_sweep_collectors_.begin();
- it != mark_sweep_collectors_.end(); ++it) {
- collector::MarkSweep* collector = *it;
+ for (const auto& collector : mark_sweep_collectors_) {
CumulativeLogger& logger = collector->GetCumulativeTimings();
if (logger.GetTotalNs() != 0) {
os << Dumpable<CumulativeLogger>(logger);
const uint64_t total_ns = logger.GetTotalNs();
- const uint64_t total_pause_ns = (*it)->GetTotalPausedTimeNs();
+ const uint64_t total_pause_ns = collector->GetTotalPausedTimeNs();
double seconds = NsToMs(logger.GetTotalNs()) / 1000.0;
const uint64_t freed_bytes = collector->GetTotalFreedBytes();
const uint64_t freed_objects = collector->GetTotalFreedObjects();
@@ -507,11 +498,9 @@ Heap::~Heap() {
space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj,
bool fail_ok) const {
- // TODO: C++0x auto
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
- if ((*it)->Contains(obj)) {
- return *it;
+ for (const auto& space : continuous_spaces_) {
+ if (space->Contains(obj)) {
+ return space;
}
}
if (!fail_ok) {
@@ -522,11 +511,9 @@ space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object
space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj,
bool fail_ok) const {
- // TODO: C++0x auto
- typedef std::vector<space::DiscontinuousSpace*>::const_iterator It;
- for (It it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) {
- if ((*it)->Contains(obj)) {
- return *it;
+ for (const auto& space : discontinuous_spaces_) {
+ if (space->Contains(obj)) {
+ return space;
}
}
if (!fail_ok) {
@@ -544,11 +531,9 @@ space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok)
}
space::ImageSpace* Heap::GetImageSpace() const {
- // TODO: C++0x auto
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
- if ((*it)->IsImageSpace()) {
- return (*it)->AsImageSpace();
+ for (const auto& space : continuous_spaces_) {
+ if (space->IsImageSpace()) {
+ return space->AsImageSpace();
}
}
return NULL;
@@ -627,10 +612,7 @@ mirror::Object* Heap::AllocObject(Thread* self, mirror::Class* c, size_t byte_co
// If the allocation failed due to fragmentation, print out the largest continuous allocation.
if (!large_object_allocation && total_bytes_free >= byte_count) {
size_t max_contiguous_allocation = 0;
- // TODO: C++0x auto
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
- space::ContinuousSpace* space = *it;
+ for (const auto& space : continuous_spaces_) {
if (space->IsDlMallocSpace()) {
space->AsDlMallocSpace()->Walk(MSpaceChunkCallback, &max_contiguous_allocation);
}
@@ -706,19 +688,14 @@ void Heap::VerifyObjectImpl(const mirror::Object* obj) {
}
void Heap::DumpSpaces() {
- // TODO: C++0x auto
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
- space::ContinuousSpace* space = *it;
+ for (const auto& space : continuous_spaces_) {
accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
LOG(INFO) << space << " " << *space << "\n"
<< live_bitmap << " " << *live_bitmap << "\n"
<< mark_bitmap << " " << *mark_bitmap;
}
- typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2;
- for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) {
- space::DiscontinuousSpace* space = *it;
+ for (const auto& space : discontinuous_spaces_) {
LOG(INFO) << space << " " << *space << "\n";
}
}
@@ -1143,11 +1120,8 @@ void Heap::PreZygoteFork() {
have_zygote_space_ = true;
// Reset the cumulative loggers since we now have a few additional timing phases.
- // TODO: C++0x
- typedef std::vector<collector::MarkSweep*>::const_iterator It;
- for (It it = mark_sweep_collectors_.begin(), end = mark_sweep_collectors_.end();
- it != end; ++it) {
- (*it)->ResetCumulativeStatistics();
+ for (const auto& collector : mark_sweep_collectors_) {
+ collector->ResetCumulativeStatistics();
}
}
@@ -1238,10 +1212,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
ATRACE_BEGIN(gc_cause_and_type_strings[gc_cause][gc_type]);
collector::MarkSweep* collector = NULL;
- typedef std::vector<collector::MarkSweep*>::iterator It;
- for (It it = mark_sweep_collectors_.begin(), end = mark_sweep_collectors_.end();
- it != end; ++it) {
- collector::MarkSweep* cur_collector = *it;
+ for (const auto& cur_collector : mark_sweep_collectors_) {
if (cur_collector->IsConcurrent() == concurrent_gc_ && cur_collector->GetGcType() == gc_type) {
collector = cur_collector;
break;
@@ -1596,9 +1567,7 @@ void Heap::SwapStacks() {
void Heap::ProcessCards(base::TimingLogger& timings) {
// Clear cards and keep track of cards cleared in the mod-union table.
- typedef std::vector<space::ContinuousSpace*>::iterator It;
- for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
- space::ContinuousSpace* space = *it;
+ for (const auto& space : continuous_spaces_) {
if (space->IsImageSpace()) {
base::TimingLogger::ScopedSplit split("ImageModUnionClearCards", &timings);
image_mod_union_table_->ClearCards(space);
@@ -2085,9 +2054,7 @@ void Heap::RegisterNativeFree(int bytes) {
int64_t Heap::GetTotalMemory() const {
int64_t ret = 0;
- typedef std::vector<space::ContinuousSpace*>::const_iterator It;
- for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
- space::ContinuousSpace* space = *it;
+ for (const auto& space : continuous_spaces_) {
if (space->IsImageSpace()) {
// Currently don't include the image space.
} else if (space->IsDlMallocSpace()) {
@@ -2095,9 +2062,7 @@ int64_t Heap::GetTotalMemory() const {
ret += space->AsDlMallocSpace()->GetFootprint();
}
}
- typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2;
- for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) {
- space::DiscontinuousSpace* space = *it;
+ for (const auto& space : discontinuous_spaces_) {
if (space->IsLargeObjectSpace()) {
ret += space->AsLargeObjectSpace()->GetBytesAllocated();
}
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 3e75716..8af4d7e 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -309,9 +309,8 @@ bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) {
}
void IndirectReferenceTable::VisitRoots(RootVisitor* visitor, void* arg) {
- typedef IndirectReferenceTable::iterator It; // TODO: C++0x auto
- for (It it = begin(), end = this->end(); it != end; ++it) {
- visitor(**it, arg);
+ for (auto ref : *this) {
+ visitor(*ref, arg);
}
}
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index 97706b8..26f53db 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -248,8 +248,6 @@ bool inline operator!=(const IrtIterator& lhs, const IrtIterator& rhs) {
class IndirectReferenceTable {
public:
- typedef IrtIterator iterator;
-
IndirectReferenceTable(size_t initialCount, size_t maxCount, IndirectRefKind kind);
~IndirectReferenceTable();
@@ -301,12 +299,12 @@ class IndirectReferenceTable {
return segment_state_.parts.topIndex;
}
- iterator begin() {
- return iterator(table_, 0, Capacity());
+ IrtIterator begin() {
+ return IrtIterator(table_, 0, Capacity());
}
- iterator end() {
- return iterator(table_, Capacity(), Capacity());
+ IrtIterator end() {
+ return IrtIterator(table_, Capacity(), Capacity());
}
void VisitRoots(RootVisitor* visitor, void* arg);
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index ae3a165..6caad01 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -206,12 +206,9 @@ static void InstrumentationRestoreStack(Thread* thread, void* arg)
}
return true; // Ignore upcalls.
}
- typedef std::deque<instrumentation::InstrumentationStackFrame>::const_iterator It; // TODO: C++0x auto
bool removed_stub = false;
// TODO: make this search more efficient?
- for (It it = instrumentation_stack_->begin(), end = instrumentation_stack_->end(); it != end;
- ++it) {
- InstrumentationStackFrame instrumentation_frame = *it;
+ for (InstrumentationStackFrame instrumentation_frame : *instrumentation_stack_) {
if (instrumentation_frame.frame_id_ == GetFrameId()) {
if (kVerboseInstrumentation) {
LOG(INFO) << " Removing exit stub in " << DescribeLocation();
@@ -407,8 +404,7 @@ const void* Instrumentation::GetQuickCodeFor(const mirror::ArtMethod* method) co
void Instrumentation::MethodEnterEventImpl(Thread* thread, mirror::Object* this_object,
const mirror::ArtMethod* method,
uint32_t dex_pc) const {
- typedef std::list<InstrumentationListener*>::const_iterator It; // TODO: C++0x auto
- It it = method_entry_listeners_.begin();
+ auto it = method_entry_listeners_.begin();
bool is_end = (it == method_entry_listeners_.end());
// Implemented this way to prevent problems caused by modification of the list while iterating.
while (!is_end) {
@@ -422,8 +418,7 @@ void Instrumentation::MethodEnterEventImpl(Thread* thread, mirror::Object* this_
void Instrumentation::MethodExitEventImpl(Thread* thread, mirror::Object* this_object,
const mirror::ArtMethod* method,
uint32_t dex_pc, const JValue& return_value) const {
- typedef std::list<InstrumentationListener*>::const_iterator It; // TODO: C++0x auto
- It it = method_exit_listeners_.begin();
+ auto it = method_exit_listeners_.begin();
bool is_end = (it == method_exit_listeners_.end());
// Implemented this way to prevent problems caused by modification of the list while iterating.
while (!is_end) {
@@ -438,10 +433,8 @@ void Instrumentation::MethodUnwindEvent(Thread* thread, mirror::Object* this_obj
const mirror::ArtMethod* method,
uint32_t dex_pc) const {
if (have_method_unwind_listeners_) {
- typedef std::list<InstrumentationListener*>::const_iterator It; // TODO: C++0x auto
- for (It it = method_unwind_listeners_.begin(), end = method_unwind_listeners_.end(); it != end;
- ++it) {
- (*it)->MethodUnwind(thread, method, dex_pc);
+ for (InstrumentationListener* listener : method_unwind_listeners_) {
+ listener->MethodUnwind(thread, method, dex_pc);
}
}
}
@@ -454,9 +447,8 @@ void Instrumentation::DexPcMovedEventImpl(Thread* thread, mirror::Object* this_o
// around the problem and in general we may have to move to something like reference counting to
// ensure listeners are deleted correctly.
std::list<InstrumentationListener*> copy(dex_pc_listeners_);
- typedef std::list<InstrumentationListener*>::const_iterator It; // TODO: C++0x auto
- for (It it = copy.begin(), end = copy.end(); it != end; ++it) {
- (*it)->DexPcMoved(thread, this_object, method, dex_pc);
+ for (InstrumentationListener* listener : copy) {
+ listener->DexPcMoved(thread, this_object, method, dex_pc);
}
}
@@ -467,10 +459,8 @@ void Instrumentation::ExceptionCaughtEvent(Thread* thread, const ThrowLocation&
if (have_exception_caught_listeners_) {
DCHECK_EQ(thread->GetException(NULL), exception_object);
thread->ClearException();
- typedef std::list<InstrumentationListener*>::const_iterator It; // TODO: C++0x auto
- for (It it = exception_caught_listeners_.begin(), end = exception_caught_listeners_.end();
- it != end; ++it) {
- (*it)->ExceptionCaught(thread, throw_location, catch_method, catch_dex_pc, exception_object);
+ for (InstrumentationListener* listener : exception_caught_listeners_) {
+ listener->ExceptionCaught(thread, throw_location, catch_method, catch_dex_pc, exception_object);
}
thread->SetException(throw_location, exception_object);
}
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index e2c1a64..d7398ca 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -40,9 +40,8 @@ void InternTable::DumpForSigQuit(std::ostream& os) const {
void InternTable::VisitRoots(RootVisitor* visitor, void* arg, bool clean_dirty) {
MutexLock mu(Thread::Current(), intern_table_lock_);
- typedef Table::const_iterator It; // TODO: C++0x auto
- for (It it = strong_interns_.begin(), end = strong_interns_.end(); it != end; ++it) {
- visitor(it->second, arg);
+ for (const auto& strong_intern : strong_interns_) {
+ visitor(strong_intern.second, arg);
}
if (clean_dirty) {
is_dirty_ = false;
@@ -52,8 +51,7 @@ void InternTable::VisitRoots(RootVisitor* visitor, void* arg, bool clean_dirty)
mirror::String* InternTable::Lookup(Table& table, mirror::String* s, uint32_t hash_code) {
intern_table_lock_.AssertHeld(Thread::Current());
- typedef Table::const_iterator It; // TODO: C++0x auto
- for (It it = table.find(hash_code), end = table.end(); it != end; ++it) {
+ for (auto it = table.find(hash_code), end = table.end(); it != end; ++it) {
mirror::String* existing_string = it->second;
if (existing_string->Equals(s)) {
return existing_string;
@@ -75,8 +73,7 @@ void InternTable::RegisterStrong(mirror::String* s) {
void InternTable::Remove(Table& table, const mirror::String* s, uint32_t hash_code) {
intern_table_lock_.AssertHeld(Thread::Current());
- typedef Table::iterator It; // TODO: C++0x auto
- for (It it = table.find(hash_code), end = table.end(); it != end; ++it) {
+ for (auto it = table.find(hash_code), end = table.end(); it != end; ++it) {
if (it->second == s) {
table.erase(it);
return;
@@ -166,8 +163,8 @@ bool InternTable::ContainsWeak(mirror::String* s) {
void InternTable::SweepInternTableWeaks(IsMarkedTester is_marked, void* arg) {
MutexLock mu(Thread::Current(), intern_table_lock_);
- typedef Table::iterator It; // TODO: C++0x auto
- for (It it = weak_interns_.begin(), end = weak_interns_.end(); it != end;) {
+ // TODO: std::remove_if + lambda.
+ for (auto it = weak_interns_.begin(), end = weak_interns_.end(); it != end;) {
mirror::Object* object = it->second;
if (!is_marked(object, arg)) {
weak_interns_.erase(it++);
diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc
index ffb93eb..d79d2c4 100644
--- a/runtime/intern_table_test.cc
+++ b/runtime/intern_table_test.cc
@@ -58,8 +58,7 @@ class TestPredicate {
public:
bool IsMarked(const mirror::Object* s) const {
bool erased = false;
- typedef std::vector<const mirror::String*>::iterator It; // TODO: C++0x auto
- for (It it = expected_.begin(), end = expected_.end(); it != end; ++it) {
+ for (auto it = expected_.begin(), end = expected_.end(); it != end; ++it) {
if (*it == s) {
expected_.erase(it);
erased = true;
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 852dd00..460e3b0 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -538,12 +538,12 @@ class Libraries {
void Dump(std::ostream& os) const {
bool first = true;
- for (It it = libraries_.begin(); it != libraries_.end(); ++it) {
+ for (const auto& library : libraries_) {
if (!first) {
os << ' ';
}
first = false;
- os << it->first;
+ os << library.first;
}
}
@@ -552,7 +552,7 @@ class Libraries {
}
SharedLibrary* Get(const std::string& path) {
- It it = libraries_.find(path);
+ auto it = libraries_.find(path);
return (it == libraries_.end()) ? NULL : it->second;
}
@@ -566,8 +566,8 @@ class Libraries {
std::string jni_short_name(JniShortName(m));
std::string jni_long_name(JniLongName(m));
const ClassLoader* declaring_class_loader = m->GetDeclaringClass()->GetClassLoader();
- for (It it = libraries_.begin(); it != libraries_.end(); ++it) {
- SharedLibrary* library = it->second;
+ for (const auto& lib : libraries_) {
+ SharedLibrary* library = lib.second;
if (library->GetClassLoader() != declaring_class_loader) {
// We only search libraries loaded by the appropriate ClassLoader.
continue;
@@ -591,8 +591,6 @@ class Libraries {
}
private:
- typedef SafeMap<std::string, SharedLibrary*>::const_iterator It; // TODO: C++0x auto
-
SafeMap<std::string, SharedLibrary*> libraries_;
};
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index 7301f23..5d4a6ea 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -442,6 +442,7 @@ class MANAGED ArtMethod : public Object {
static Class* java_lang_reflect_ArtMethod_;
+ private:
friend struct art::ArtMethodOffsets; // for verifying offset information
DISALLOW_IMPLICIT_CONSTRUCTORS(ArtMethod);
};
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 48c0569..ff193c9 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -979,9 +979,7 @@ void MonitorList::Add(Monitor* m) {
void MonitorList::SweepMonitorList(IsMarkedTester is_marked, void* arg) {
MutexLock mu(Thread::Current(), monitor_list_lock_);
- typedef std::list<Monitor*>::iterator It; // TODO: C++0x auto
- It it = list_.begin();
- while (it != list_.end()) {
+ for (auto it = list_.begin(); it != list_.end(); ) {
Monitor* m = *it;
if (!is_marked(m->GetObject(), arg)) {
VLOG(monitor) << "freeing monitor " << m << " belonging to unmarked object " << m->GetObject();
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 061dfb8..4ee3533 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -253,14 +253,10 @@ static jboolean DexFile_isDexOptNeeded(JNIEnv* env, jclass, jstring javaFilename
return JNI_TRUE;
}
- gc::Heap* heap = runtime->GetHeap();
- const std::vector<gc::space::ContinuousSpace*>& spaces = heap->GetContinuousSpaces();
- // TODO: C++0x auto
- typedef std::vector<gc::space::ContinuousSpace*>::const_iterator It;
- for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
- if ((*it)->IsImageSpace()) {
+ for (const auto& space : runtime->GetHeap()->GetContinuousSpaces()) {
+ if (space->IsImageSpace()) {
// TODO: Ensure this works with multiple image spaces.
- const ImageHeader& image_header = (*it)->AsImageSpace()->GetImageHeader();
+ const ImageHeader& image_header = space->AsImageSpace()->GetImageHeader();
if (oat_file->GetOatHeader().GetImageFileLocationOatChecksum() != image_header.GetOatChecksum()) {
ScopedObjectAccess soa(env);
LOG(INFO) << "DexFile_isDexOptNeeded cache file " << cache_location
diff --git a/runtime/output_stream_test.cc b/runtime/output_stream_test.cc
index 8da2ac9..d5e9755 100644
--- a/runtime/output_stream_test.cc
+++ b/runtime/output_stream_test.cc
@@ -78,4 +78,4 @@ TEST_F(OutputStreamTest, Vector) {
CheckTestOutput(output);
}
-} // namespace std
+} // namespace art
diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc
index de64c26..8e23cbb 100644
--- a/runtime/reference_table.cc
+++ b/runtime/reference_table.cc
@@ -232,9 +232,8 @@ void ReferenceTable::Dump(std::ostream& os, const Table& entries) {
}
void ReferenceTable::VisitRoots(RootVisitor* visitor, void* arg) {
- typedef Table::const_iterator It; // TODO: C++0x auto
- for (It it = entries_.begin(), end = entries_.end(); it != end; ++it) {
- visitor(*it, arg);
+ for (const auto& ref : entries_) {
+ visitor(ref, arg);
}
}
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 9c28c87..671924a 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -53,8 +53,8 @@ bool ThreadList::Contains(Thread* thread) {
}
bool ThreadList::Contains(pid_t tid) {
- for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
- if ((*it)->tid_ == tid) {
+ for (const auto& thread : list_) {
+ if (thread->tid_ == tid) {
return true;
}
}
@@ -113,8 +113,8 @@ void ThreadList::DumpUnattachedThreads(std::ostream& os) {
void ThreadList::DumpLocked(std::ostream& os) {
os << "DALVIK THREADS (" << list_.size() << "):\n";
- for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
- (*it)->Dump(os);
+ for (const auto& thread : list_) {
+ thread->Dump(os);
os << "\n";
}
}
@@ -122,8 +122,7 @@ void ThreadList::DumpLocked(std::ostream& os) {
void ThreadList::AssertThreadsAreSuspended(Thread* self, Thread* ignore1, Thread* ignore2) {
MutexLock mu(self, *Locks::thread_list_lock_);
MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
- for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
- Thread* thread = *it;
+ for (const auto& thread : list_) {
if (thread != ignore1 && thread != ignore2) {
CHECK(thread->IsSuspended())
<< "\nUnsuspended thread: <<" << *thread << "\n"
@@ -160,9 +159,7 @@ size_t ThreadList::RunCheckpoint(Closure* checkpoint_function) {
// Call a checkpoint function for each thread, threads which are suspend get their checkpoint
// manually called.
MutexLock mu(self, *Locks::thread_list_lock_);
- // TODO: C++0x auto.
- for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
- Thread* thread = *it;
+ for (const auto& thread : list_) {
if (thread != self) {
for (;;) {
if (thread->RequestCheckpoint(checkpoint_function)) {
@@ -189,8 +186,7 @@ size_t ThreadList::RunCheckpoint(Closure* checkpoint_function) {
checkpoint_function->Run(self);
// Run the checkpoint on the suspended threads.
- for (size_t i = 0; i < suspended_count_modified_threads.size(); ++i) {
- Thread* thread = suspended_count_modified_threads[i];
+ for (const auto& thread : suspended_count_modified_threads) {
if (!thread->IsSuspended()) {
// Wait until the thread is suspended.
uint64_t start = NanoTime();
@@ -243,8 +239,7 @@ void ThreadList::SuspendAll() {
// Update global suspend all state for attaching threads.
++suspend_all_count_;
// Increment everybody's suspend count (except our own).
- for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
- Thread* thread = *it;
+ for (const auto& thread : list_) {
if (thread == self) {
continue;
}
@@ -285,8 +280,7 @@ void ThreadList::ResumeAll() {
// Update global suspend all state for attaching threads.
--suspend_all_count_;
// Decrement the suspend counts for all threads.
- for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
- Thread* thread = *it;
+ for (const auto& thread : list_) {
if (thread == self) {
continue;
}
@@ -341,8 +335,7 @@ void ThreadList::SuspendAllForDebugger() {
++suspend_all_count_;
++debug_suspend_all_count_;
// Increment everybody's suspend count (except our own).
- for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
- Thread* thread = *it;
+ for (const auto& thread : list_) {
if (thread == self || thread == debug_thread) {
continue;
}
@@ -427,8 +420,7 @@ void ThreadList::UndoDebuggerSuspensions() {
suspend_all_count_ -= debug_suspend_all_count_;
debug_suspend_all_count_ = 0;
// Update running threads.
- for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
- Thread* thread = *it;
+ for (const auto& thread : list_) {
if (thread == self || thread->debug_suspend_count_ == 0) {
continue;
}
@@ -457,8 +449,7 @@ void ThreadList::WaitForOtherNonDaemonThreadsToExit() {
}
all_threads_are_daemons = true;
MutexLock mu(self, *Locks::thread_list_lock_);
- for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
- Thread* thread = *it;
+ for (const auto& thread : list_) {
if (thread != self && !thread->IsDaemon()) {
all_threads_are_daemons = false;
break;
@@ -476,8 +467,7 @@ void ThreadList::SuspendAllDaemonThreads() {
MutexLock mu(self, *Locks::thread_list_lock_);
{ // Tell all the daemons it's time to suspend.
MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
- for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
- Thread* thread = *it;
+ for (const auto& thread : list_) {
// This is only run after all non-daemon threads have exited, so the remainder should all be
// daemons.
CHECK(thread->IsDaemon()) << *thread;
@@ -491,8 +481,7 @@ void ThreadList::SuspendAllDaemonThreads() {
for (int i = 0; i < 10; ++i) {
usleep(200 * 1000);
bool all_suspended = true;
- for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
- Thread* thread = *it;
+ for (const auto& thread : list_) {
if (thread != self && thread->GetState() == kRunnable) {
if (!have_complained) {
LOG(WARNING) << "daemon thread not yet suspended: " << *thread;
@@ -567,22 +556,22 @@ void ThreadList::Unregister(Thread* self) {
}
void ThreadList::ForEach(void (*callback)(Thread*, void*), void* context) {
- for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
- callback(*it, context);
+ for (const auto& thread : list_) {
+ callback(thread, context);
}
}
void ThreadList::VisitRoots(RootVisitor* visitor, void* arg) const {
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
- for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
- (*it)->VisitRoots(visitor, arg);
+ for (const auto& thread : list_) {
+ thread->VisitRoots(visitor, arg);
}
}
void ThreadList::VerifyRoots(VerifyRootVisitor* visitor, void* arg) const {
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
- for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
- (*it)->VerifyRoots(visitor, arg);
+ for (const auto& thread : list_) {
+ thread->VerifyRoots(visitor, arg);
}
}
@@ -607,9 +596,9 @@ void ThreadList::ReleaseThreadId(Thread* self, uint32_t id) {
Thread* ThreadList::FindThreadByThinLockId(uint32_t thin_lock_id) {
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
- for (It it = list_.begin(), end = list_.end(); it != end; ++it) {
- if ((*it)->GetThinLockId() == thin_lock_id) {
- return *it;
+ for (const auto& thread : list_) {
+ if (thread->GetThinLockId() == thin_lock_id) {
+ return thread;
}
}
return NULL;
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index d95f191..3df3e2c 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -102,8 +102,6 @@ class ThreadList {
Thread* FindThreadByThinLockId(uint32_t thin_lock_id);
private:
- typedef std::list<Thread*>::const_iterator It; // TODO: C++0x auto
-
uint32_t AllocThreadId(Thread* self);
void ReleaseThreadId(Thread* self, uint32_t id) LOCKS_EXCLUDED(allocated_ids_lock_);
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index c26926c..9c6d47b 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -55,6 +55,7 @@ class ThreadPoolWorker {
const size_t stack_size_;
pthread_t pthread_;
+ private:
friend class ThreadPool;
DISALLOW_COPY_AND_ASSIGN(ThreadPoolWorker);
};
@@ -117,6 +118,7 @@ class ThreadPool {
uint64_t total_wait_time_;
Barrier creation_barier_;
+ private:
friend class ThreadPoolWorker;
friend class WorkStealingWorker;
DISALLOW_COPY_AND_ASSIGN(ThreadPool);
@@ -153,6 +155,7 @@ class WorkStealingWorker : public ThreadPoolWorker {
WorkStealingWorker(ThreadPool* thread_pool, const std::string& name, size_t stack_size);
virtual void Run();
+ private:
friend class WorkStealingThreadPool;
DISALLOW_COPY_AND_ASSIGN(WorkStealingWorker);
};
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 13e2bf6..5d6943c 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -610,12 +610,9 @@ void Trace::GetVisitedMethods(size_t buf_size,
}
}
-void Trace::DumpMethodList(std::ostream& os,
- const std::set<mirror::ArtMethod*>& visited_methods) {
- typedef std::set<mirror::ArtMethod*>::const_iterator It; // TODO: C++0x auto
+void Trace::DumpMethodList(std::ostream& os, const std::set<mirror::ArtMethod*>& visited_methods) {
MethodHelper mh;
- for (It it = visited_methods.begin(); it != visited_methods.end(); ++it) {
- mirror::ArtMethod* method = *it;
+ for (const auto& method : visited_methods) {
mh.ChangeMethod(method);
os << StringPrintf("%p\t%s\t%s\t%s\t%s\n", method,
PrettyDescriptor(mh.GetDeclaringClassDescriptor()).c_str(), mh.GetName(),
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 6171943..ff4386e 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -122,7 +122,7 @@ class PcToRegisterLineTable {
uint16_t registers_size, MethodVerifier* verifier);
RegisterLine* GetLine(size_t idx) {
- Table::iterator result = pc_to_register_line_.find(idx); // TODO: C++0x auto
+ auto result = pc_to_register_line_.find(idx);
if (result == pc_to_register_line_.end()) {
return NULL;
} else {
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 8418928..25f840c 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -395,8 +395,7 @@ std::string UnresolvedMergedType::Dump() const {
std::stringstream result;
std::set<uint16_t> types = GetMergedTypes();
result << "UnresolvedMergedReferences(";
- typedef std::set<uint16_t>::const_iterator It; // TODO: C++0x auto
- It it = types.begin();
+ auto it = types.begin();
result << reg_type_cache_->GetFromId(*it).Dump();
for (++it; it != types.end(); ++it) {
result << ", ";
@@ -609,9 +608,8 @@ std::set<uint16_t> UnresolvedMergedType::GetMergedTypes() const {
types.insert(refs.second);
}
if (kIsDebugBuild) {
- typedef std::set<uint16_t>::const_iterator It; // TODO: C++0x auto
- for (It it = types.begin(); it != types.end(); ++it) {
- CHECK(!reg_type_cache_->GetFromId(*it).IsUnresolvedMergedReference());
+ for (const auto& type : types) {
+ CHECK(!reg_type_cache_->GetFromId(type).IsUnresolvedMergedReference());
}
}
return types;
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 33f4195..865ba20 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -287,6 +287,7 @@ class RegType {
friend class RegTypeCache;
+ private:
DISALLOW_COPY_AND_ASSIGN(RegType);
};
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index 24a626b..5affe47 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -208,9 +208,8 @@ std::string RegisterLine::Dump() const {
result += GetRegisterType(i).Dump();
result += "],";
}
- typedef std::deque<uint32_t>::const_iterator It; // TODO: C++0x auto
- for (It it = monitors_.begin(), end = monitors_.end(); it != end; ++it) {
- result += StringPrintf("{%d},", *it);
+ for (const auto& monitor : monitors_) {
+ result += StringPrintf("{%d},", monitor);
}
return result;
}