summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2014-02-07 12:18:39 -0800
committerMathieu Chartier <mathieuc@google.com>2014-02-08 15:23:36 -0800
commit412c7fced915fc8d4d5e4166e977d55c809168a6 (patch)
treeb8efee7f46a440f3e89765b1e9b2aa6454839c87
parent109e2b34799a377a0407781fc32ec1ec607d6c41 (diff)
downloadart-412c7fced915fc8d4d5e4166e977d55c809168a6.zip
art-412c7fced915fc8d4d5e4166e977d55c809168a6.tar.gz
art-412c7fced915fc8d4d5e4166e977d55c809168a6.tar.bz2
Make debugger / jdwp compaction safe.
Fixed GetInstances, GetReferringObjects, CountInstances to use VisitObjects instead of the live bitmap. We now treat the object registry as system weaks and update the objects when/if they move. Also added the recent_allocation_records_ as roots. Bug: 12936165 Change-Id: I615c289efbf2977ceab5c4ffa73d216d799e6e33
-rw-r--r--runtime/debugger.cc64
-rw-r--r--runtime/debugger.h7
-rw-r--r--runtime/gc/heap.cc84
-rw-r--r--runtime/gc/heap.h2
-rw-r--r--runtime/jdwp/object_registry.cc132
-rw-r--r--runtime/jdwp/object_registry.h17
-rw-r--r--runtime/native/dalvik_system_VMDebug.cc9
-rw-r--r--runtime/runtime.cc3
8 files changed, 211 insertions, 107 deletions
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 99e7867..8280c7c 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -63,6 +63,9 @@ struct AllocRecordStackTraceElement {
mirror::ArtMethod* method;
uint32_t dex_pc;
+ AllocRecordStackTraceElement() : method(nullptr), dex_pc(0) {
+ }
+
int32_t LineNumber() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return MethodHelper(method).GetLineNumFromDexPC(dex_pc);
}
@@ -81,6 +84,20 @@ struct AllocRecord {
}
return depth;
}
+
+ void UpdateObjectPointers(RootVisitor* visitor, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (type != nullptr) {
+ type = down_cast<mirror::Class*>(visitor(type, arg));
+ }
+ for (size_t stack_frame = 0; stack_frame < kMaxAllocRecordStackDepth; ++stack_frame) {
+ mirror::ArtMethod*& m = stack[stack_frame].method;
+ if (m == nullptr) {
+ break;
+ }
+ m = down_cast<mirror::ArtMethod*>(visitor(m, arg));
+ }
+ }
};
struct Breakpoint {
@@ -775,6 +792,8 @@ JDWP::JdwpError Dbg::GetContendedMonitor(JDWP::ObjectId thread_id, JDWP::ObjectI
JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids,
std::vector<uint64_t>& counts)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ heap->CollectGarbage(false);
std::vector<mirror::Class*> classes;
counts.clear();
for (size_t i = 0; i < class_ids.size(); ++i) {
@@ -786,19 +805,20 @@ JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class
classes.push_back(c);
counts.push_back(0);
}
-
- Runtime::Current()->GetHeap()->CountInstances(classes, false, &counts[0]);
+ heap->CountInstances(classes, false, &counts[0]);
return JDWP::ERR_NONE;
}
JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count, std::vector<JDWP::ObjectId>& instances)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ // We only want reachable instances, so do a GC.
+ heap->CollectGarbage(false);
JDWP::JdwpError status;
mirror::Class* c = DecodeClass(class_id, status);
- if (c == NULL) {
+ if (c == nullptr) {
return status;
}
-
std::vector<mirror::Object*> raw_instances;
Runtime::Current()->GetHeap()->GetInstances(c, max_count, raw_instances);
for (size_t i = 0; i < raw_instances.size(); ++i) {
@@ -810,13 +830,14 @@ JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count, s
JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count,
std::vector<JDWP::ObjectId>& referring_objects)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ heap->CollectGarbage(false);
mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
if (o == NULL || o == ObjectRegistry::kInvalidObject) {
return JDWP::ERR_INVALID_OBJECT;
}
-
std::vector<mirror::Object*> raw_instances;
- Runtime::Current()->GetHeap()->GetReferringObjects(o, max_count, raw_instances);
+ heap->GetReferringObjects(o, max_count, raw_instances);
for (size_t i = 0; i < raw_instances.size(); ++i) {
referring_objects.push_back(gRegistry->Add(raw_instances[i]));
}
@@ -3772,6 +3793,37 @@ void Dbg::DumpRecentAllocations() {
}
}
+void Dbg::UpdateObjectPointers(RootVisitor* visitor, void* arg) {
+ {
+ MutexLock mu(Thread::Current(), gAllocTrackerLock);
+ if (recent_allocation_records_ != nullptr) {
+ size_t i = HeadIndex();
+ size_t count = gAllocRecordCount;
+ while (count--) {
+ AllocRecord* record = &recent_allocation_records_[i];
+ DCHECK(record != nullptr);
+ record->UpdateObjectPointers(visitor, arg);
+ i = (i + 1) & (gAllocRecordMax - 1);
+ }
+ }
+ }
+ if (gRegistry != nullptr) {
+ gRegistry->UpdateObjectPointers(visitor, arg);
+ }
+}
+
+void Dbg::AllowNewObjectRegistryObjects() {
+ if (gRegistry != nullptr) {
+ gRegistry->AllowNewObjects();
+ }
+}
+
+void Dbg::DisallowNewObjectRegistryObjects() {
+ if (gRegistry != nullptr) {
+ gRegistry->DisallowNewObjects();
+ }
+}
+
class StringTable {
public:
StringTable() {
diff --git a/runtime/debugger.h b/runtime/debugger.h
index 328c9cd..f1e3f45 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -452,6 +452,10 @@ class Dbg {
static jbyteArray GetRecentAllocations() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void DumpRecentAllocations();
+ // Updates the stored direct object pointers (called from SweepSystemWeaks).
+ static void UpdateObjectPointers(RootVisitor* visitor, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
enum HpifWhen {
HPIF_WHEN_NEVER = 0,
HPIF_WHEN_NOW = 1,
@@ -476,6 +480,9 @@ class Dbg {
static void DdmSendHeapSegments(bool native)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void AllowNewObjectRegistryObjects() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void DisallowNewObjectRegistryObjects() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
private:
static void DdmBroadcast(bool connect) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void PostThreadStartOrStop(Thread*, uint32_t)
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index f438ca0..62567d7 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1061,18 +1061,18 @@ class InstanceCounter {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) {
}
-
- void operator()(mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- for (size_t i = 0; i < classes_.size(); ++i) {
- mirror::Class* instance_class = o->GetClass();
- if (use_is_assignable_from_) {
- if (instance_class != NULL && classes_[i]->IsAssignableFrom(instance_class)) {
- ++counts_[i];
- }
- } else {
- if (instance_class == classes_[i]) {
- ++counts_[i];
+ static void Callback(mirror::Object* obj, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ InstanceCounter* instance_counter = reinterpret_cast<InstanceCounter*>(arg);
+ mirror::Class* instance_class = obj->GetClass();
+ CHECK(instance_class != nullptr);
+ for (size_t i = 0; i < instance_counter->classes_.size(); ++i) {
+ if (instance_counter->use_is_assignable_from_) {
+ if (instance_counter->classes_[i]->IsAssignableFrom(instance_class)) {
+ ++instance_counter->counts_[i];
}
+ } else if (instance_class == instance_counter->classes_[i]) {
+ ++instance_counter->counts_[i];
}
}
}
@@ -1081,22 +1081,18 @@ class InstanceCounter {
const std::vector<mirror::Class*>& classes_;
bool use_is_assignable_from_;
uint64_t* const counts_;
-
DISALLOW_COPY_AND_ASSIGN(InstanceCounter);
};
void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
uint64_t* counts) {
- // We only want reachable instances, so do a GC. This also ensures that the alloc stack
- // is empty, so the live bitmap is the only place we need to look.
+ // Can't do any GC in this function since this may move classes.
Thread* self = Thread::Current();
- self->TransitionFromRunnableToSuspended(kNative);
- CollectGarbage(false);
- self->TransitionFromSuspendedToRunnable();
-
+ auto* old_cause = self->StartAssertNoThreadSuspension("CountInstances");
InstanceCounter counter(classes, use_is_assignable_from, counts);
- ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
- GetLiveBitmap()->Visit(counter);
+ WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ VisitObjects(InstanceCounter::Callback, &counter);
+ self->EndAssertNoThreadSuspension(old_cause);
}
class InstanceCollector {
@@ -1105,12 +1101,15 @@ class InstanceCollector {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: class_(c), max_count_(max_count), instances_(instances) {
}
-
- void operator()(mirror::Object* o) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::Class* instance_class = o->GetClass();
- if (instance_class == class_) {
- if (max_count_ == 0 || instances_.size() < max_count_) {
- instances_.push_back(o);
+ static void Callback(mirror::Object* obj, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ DCHECK(arg != nullptr);
+ InstanceCollector* instance_collector = reinterpret_cast<InstanceCollector*>(arg);
+ mirror::Class* instance_class = obj->GetClass();
+ if (instance_class == instance_collector->class_) {
+ if (instance_collector->max_count_ == 0 ||
+ instance_collector->instances_.size() < instance_collector->max_count_) {
+ instance_collector->instances_.push_back(obj);
}
}
}
@@ -1119,22 +1118,18 @@ class InstanceCollector {
mirror::Class* class_;
uint32_t max_count_;
std::vector<mirror::Object*>& instances_;
-
DISALLOW_COPY_AND_ASSIGN(InstanceCollector);
};
void Heap::GetInstances(mirror::Class* c, int32_t max_count,
std::vector<mirror::Object*>& instances) {
- // We only want reachable instances, so do a GC. This also ensures that the alloc stack
- // is empty, so the live bitmap is the only place we need to look.
+ // Can't do any GC in this function since this may move classes.
Thread* self = Thread::Current();
- self->TransitionFromRunnableToSuspended(kNative);
- CollectGarbage(false);
- self->TransitionFromSuspendedToRunnable();
-
+ auto* old_cause = self->StartAssertNoThreadSuspension("GetInstances");
InstanceCollector collector(c, max_count, instances);
- ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
- GetLiveBitmap()->Visit(collector);
+ WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ VisitObjects(&InstanceCollector::Callback, &collector);
+ self->EndAssertNoThreadSuspension(old_cause);
}
class ReferringObjectsFinder {
@@ -1145,6 +1140,11 @@ class ReferringObjectsFinder {
: object_(object), max_count_(max_count), referring_objects_(referring_objects) {
}
+ static void Callback(mirror::Object* obj, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ reinterpret_cast<ReferringObjectsFinder*>(arg)->operator()(obj);
+ }
+
// For bitmap Visit.
// TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
// annotalysis on visitors.
@@ -1164,22 +1164,18 @@ class ReferringObjectsFinder {
mirror::Object* object_;
uint32_t max_count_;
std::vector<mirror::Object*>& referring_objects_;
-
DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
};
void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count,
std::vector<mirror::Object*>& referring_objects) {
- // We only want reachable instances, so do a GC. This also ensures that the alloc stack
- // is empty, so the live bitmap is the only place we need to look.
+ // Can't do any GC in this function since this may move classes.
Thread* self = Thread::Current();
- self->TransitionFromRunnableToSuspended(kNative);
- CollectGarbage(false);
- self->TransitionFromSuspendedToRunnable();
-
+ auto* old_cause = self->StartAssertNoThreadSuspension("GetReferringObjects");
ReferringObjectsFinder finder(o, max_count, referring_objects);
- ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
- GetLiveBitmap()->Visit(finder);
+ WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ VisitObjects(&ReferringObjectsFinder::Callback, &finder);
+ self->EndAssertNoThreadSuspension(old_cause);
}
void Heap::CollectGarbage(bool clear_soft_references) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index f35ff4f..476ceee 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -249,7 +249,7 @@ class Heap {
void DecrementDisableMovingGC(Thread* self);
// Initiates an explicit garbage collection.
- void CollectGarbage(bool clear_soft_references) LOCKS_EXCLUDED(Locks::mutator_lock_);
+ void CollectGarbage(bool clear_soft_references);
// Does a concurrent GC, should only be called by the GC daemon thread
// through runtime.
diff --git a/runtime/jdwp/object_registry.cc b/runtime/jdwp/object_registry.cc
index 369eddd..40ba3e3 100644
--- a/runtime/jdwp/object_registry.cc
+++ b/runtime/jdwp/object_registry.cc
@@ -31,7 +31,8 @@ std::ostream& operator<<(std::ostream& os, const ObjectRegistryEntry& rhs) {
}
ObjectRegistry::ObjectRegistry()
- : lock_("ObjectRegistry lock", kJdwpObjectRegistryLock), next_id_(1) {
+ : lock_("ObjectRegistry lock", kJdwpObjectRegistryLock), allow_new_objects_(true),
+ condition_("object registry condition", lock_), next_id_(1) {
}
JDWP::RefTypeId ObjectRegistry::AddRefType(mirror::Class* c) {
@@ -49,58 +50,59 @@ JDWP::ObjectId ObjectRegistry::InternalAdd(mirror::Object* o) {
ScopedObjectAccessUnchecked soa(Thread::Current());
MutexLock mu(soa.Self(), lock_);
- ObjectRegistryEntry dummy;
- dummy.jni_reference_type = JNIWeakGlobalRefType;
- dummy.jni_reference = NULL;
- dummy.reference_count = 0;
- dummy.id = 0;
- std::pair<object_iterator, bool> result = object_to_entry_.insert(std::make_pair(o, dummy));
- ObjectRegistryEntry& entry = result.first->second;
- if (!result.second) {
- // This object was already in our map.
- entry.reference_count += 1;
- return entry.id;
+ while (UNLIKELY(!allow_new_objects_)) {
+ condition_.WaitHoldingLocks(soa.Self());
}
+ ObjectRegistryEntry* entry;
+ auto it = object_to_entry_.find(o);
+ if (it != object_to_entry_.end()) {
+ // This object was already in our map.
+ entry = it->second;
+ ++entry->reference_count;
+ } else {
+ entry = new ObjectRegistryEntry;
+ entry->jni_reference_type = JNIWeakGlobalRefType;
+ entry->jni_reference = nullptr;
+ entry->reference_count = 0;
+ entry->id = 0;
+ object_to_entry_.insert(std::make_pair(o, entry));
- // This object isn't in the registry yet, so add it.
- JNIEnv* env = soa.Env();
-
- jobject local_reference = soa.AddLocalReference<jobject>(o);
+ // This object isn't in the registry yet, so add it.
+ JNIEnv* env = soa.Env();
- entry.jni_reference_type = JNIWeakGlobalRefType;
- entry.jni_reference = env->NewWeakGlobalRef(local_reference);
- entry.reference_count = 1;
- entry.id = next_id_++;
+ jobject local_reference = soa.AddLocalReference<jobject>(o);
- id_to_entry_.Put(entry.id, &entry);
+ entry->jni_reference_type = JNIWeakGlobalRefType;
+ entry->jni_reference = env->NewWeakGlobalRef(local_reference);
+ entry->reference_count = 1;
+ entry->id = next_id_++;
- env->DeleteLocalRef(local_reference);
+ id_to_entry_.Put(entry->id, entry);
- return entry.id;
+ env->DeleteLocalRef(local_reference);
+ }
+ return entry->id;
}
bool ObjectRegistry::Contains(mirror::Object* o) {
- Thread* self = Thread::Current();
- MutexLock mu(self, lock_);
- return (object_to_entry_.find(o) != object_to_entry_.end());
+ MutexLock mu(Thread::Current(), lock_);
+ return object_to_entry_.find(o) != object_to_entry_.end();
}
void ObjectRegistry::Clear() {
Thread* self = Thread::Current();
MutexLock mu(self, lock_);
VLOG(jdwp) << "Object registry contained " << object_to_entry_.size() << " entries";
-
// Delete all the JNI references.
JNIEnv* env = self->GetJniEnv();
- for (object_iterator it = object_to_entry_.begin(); it != object_to_entry_.end(); ++it) {
- ObjectRegistryEntry& entry = (it->second);
+ for (const auto& pair : object_to_entry_) {
+ const ObjectRegistryEntry& entry = *pair.second;
if (entry.jni_reference_type == JNIWeakGlobalRefType) {
env->DeleteWeakGlobalRef(entry.jni_reference);
} else {
env->DeleteGlobalRef(entry.jni_reference);
}
}
-
// Clear the maps.
object_to_entry_.clear();
id_to_entry_.clear();
@@ -109,11 +111,11 @@ void ObjectRegistry::Clear() {
mirror::Object* ObjectRegistry::InternalGet(JDWP::ObjectId id) {
Thread* self = Thread::Current();
MutexLock mu(self, lock_);
- id_iterator it = id_to_entry_.find(id);
+ auto it = id_to_entry_.find(id);
if (it == id_to_entry_.end()) {
return kInvalidObject;
}
- ObjectRegistryEntry& entry = *(it->second);
+ ObjectRegistryEntry& entry = *it->second;
return self->DecodeJObject(entry.jni_reference);
}
@@ -123,26 +125,26 @@ jobject ObjectRegistry::GetJObject(JDWP::ObjectId id) {
}
Thread* self = Thread::Current();
MutexLock mu(self, lock_);
- id_iterator it = id_to_entry_.find(id);
+ auto it = id_to_entry_.find(id);
CHECK(it != id_to_entry_.end()) << id;
- ObjectRegistryEntry& entry = *(it->second);
+ ObjectRegistryEntry& entry = *it->second;
return entry.jni_reference;
}
void ObjectRegistry::DisableCollection(JDWP::ObjectId id) {
Thread* self = Thread::Current();
MutexLock mu(self, lock_);
- id_iterator it = id_to_entry_.find(id);
+ auto it = id_to_entry_.find(id);
CHECK(it != id_to_entry_.end());
- Promote(*(it->second));
+ Promote(*it->second);
}
void ObjectRegistry::EnableCollection(JDWP::ObjectId id) {
Thread* self = Thread::Current();
MutexLock mu(self, lock_);
- id_iterator it = id_to_entry_.find(id);
+ auto it = id_to_entry_.find(id);
CHECK(it != id_to_entry_.end());
- Demote(*(it->second));
+ Demote(*it->second);
}
void ObjectRegistry::Demote(ObjectRegistryEntry& entry) {
@@ -170,10 +172,9 @@ void ObjectRegistry::Promote(ObjectRegistryEntry& entry) {
bool ObjectRegistry::IsCollected(JDWP::ObjectId id) {
Thread* self = Thread::Current();
MutexLock mu(self, lock_);
- id_iterator it = id_to_entry_.find(id);
+ auto it = id_to_entry_.find(id);
CHECK(it != id_to_entry_.end());
-
- ObjectRegistryEntry& entry = *(it->second);
+ ObjectRegistryEntry& entry = *it->second;
if (entry.jni_reference_type == JNIWeakGlobalRefType) {
JNIEnv* env = self->GetJniEnv();
return env->IsSameObject(entry.jni_reference, NULL); // Has the jweak been collected?
@@ -185,24 +186,55 @@ bool ObjectRegistry::IsCollected(JDWP::ObjectId id) {
void ObjectRegistry::DisposeObject(JDWP::ObjectId id, uint32_t reference_count) {
Thread* self = Thread::Current();
MutexLock mu(self, lock_);
- id_iterator it = id_to_entry_.find(id);
+ auto it = id_to_entry_.find(id);
if (it == id_to_entry_.end()) {
return;
}
-
- ObjectRegistryEntry& entry = *(it->second);
- entry.reference_count -= reference_count;
- if (entry.reference_count <= 0) {
+ ObjectRegistryEntry* entry = it->second;
+ entry->reference_count -= reference_count;
+ if (entry->reference_count <= 0) {
JNIEnv* env = self->GetJniEnv();
- mirror::Object* object = self->DecodeJObject(entry.jni_reference);
- if (entry.jni_reference_type == JNIWeakGlobalRefType) {
- env->DeleteWeakGlobalRef(entry.jni_reference);
+ mirror::Object* object = self->DecodeJObject(entry->jni_reference);
+ if (entry->jni_reference_type == JNIWeakGlobalRefType) {
+ env->DeleteWeakGlobalRef(entry->jni_reference);
} else {
- env->DeleteGlobalRef(entry.jni_reference);
+ env->DeleteGlobalRef(entry->jni_reference);
}
object_to_entry_.erase(object);
id_to_entry_.erase(id);
+ delete entry;
}
}
+void ObjectRegistry::UpdateObjectPointers(RootVisitor visitor, void* arg) {
+ MutexLock mu(Thread::Current(), lock_);
+ if (object_to_entry_.empty()) {
+ return;
+ }
+ std::map<mirror::Object*, ObjectRegistryEntry*> new_object_to_entry;
+ for (auto& pair : object_to_entry_) {
+ mirror::Object* new_obj;
+ if (pair.first != nullptr) {
+ new_obj = visitor(pair.first, arg);
+ if (new_obj != nullptr) {
+ new_object_to_entry.insert(std::make_pair(new_obj, pair.second));
+ }
+ }
+ }
+ object_to_entry_ = new_object_to_entry;
+}
+
+void ObjectRegistry::AllowNewObjects() {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, lock_);
+ allow_new_objects_ = true;
+ condition_.Broadcast(self);
+}
+
+void ObjectRegistry::DisallowNewObjects() {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, lock_);
+ allow_new_objects_ = false;
+}
+
} // namespace art
diff --git a/runtime/jdwp/object_registry.h b/runtime/jdwp/object_registry.h
index 7f162ca..0190575 100644
--- a/runtime/jdwp/object_registry.h
+++ b/runtime/jdwp/object_registry.h
@@ -26,6 +26,7 @@
#include "mirror/class.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
+#include "root_visitor.h"
#include "safe_map.h"
namespace art {
@@ -83,6 +84,15 @@ class ObjectRegistry {
// Avoid using this and use standard Get when possible.
jobject GetJObject(JDWP::ObjectId id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Visit, objects are treated as system weaks.
+ void UpdateObjectPointers(RootVisitor visitor, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // We have allow / disallow functionality since we use system weak sweeping logic to update moved
+ // objects inside of the object_to_entry_ map.
+ void AllowNewObjects() LOCKS_EXCLUDED(lock_);
+ void DisallowNewObjects() LOCKS_EXCLUDED(lock_);
+
private:
JDWP::ObjectId InternalAdd(mirror::Object* o) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::Object* InternalGet(JDWP::ObjectId id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -90,11 +100,10 @@ class ObjectRegistry {
void Promote(ObjectRegistryEntry& entry) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, lock_);
Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ bool allow_new_objects_ GUARDED_BY(lock_);
+ ConditionVariable condition_ GUARDED_BY(lock_);
- typedef std::map<mirror::Object*, ObjectRegistryEntry>::iterator object_iterator;
- std::map<mirror::Object*, ObjectRegistryEntry> object_to_entry_ GUARDED_BY(lock_);
-
- typedef SafeMap<JDWP::ObjectId, ObjectRegistryEntry*>::iterator id_iterator;
+ std::map<mirror::Object*, ObjectRegistryEntry*> object_to_entry_ GUARDED_BY(lock_);
SafeMap<JDWP::ObjectId, ObjectRegistryEntry*> id_to_entry_ GUARDED_BY(lock_);
size_t next_id_ GUARDED_BY(lock_);
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index dceea5c..d9baaaf 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -233,14 +233,19 @@ static void VMDebug_infopoint(JNIEnv*, jclass, jint id) {
static jlong VMDebug_countInstancesOfClass(JNIEnv* env, jclass, jclass javaClass,
jboolean countAssignable) {
ScopedObjectAccess soa(env);
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ // We only want reachable instances, so do a GC. This also ensures that the alloc stack
+ // is empty, so the live bitmap is the only place we need to look. Need to do GC before decoding
+ // any jobjects.
+ heap->CollectGarbage(false);
mirror::Class* c = soa.Decode<mirror::Class*>(javaClass);
- if (c == NULL) {
+ if (c == nullptr) {
return 0;
}
std::vector<mirror::Class*> classes;
classes.push_back(c);
uint64_t count = 0;
- Runtime::Current()->GetHeap()->CountInstances(classes, countAssignable, &count);
+ heap->CountInstances(classes, countAssignable, &count);
return count;
}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 17afb43..09d05d1 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -379,6 +379,7 @@ void Runtime::SweepSystemWeaks(RootVisitor* visitor, void* arg) {
GetInternTable()->SweepInternTableWeaks(visitor, arg);
GetMonitorList()->SweepMonitorList(visitor, arg);
GetJavaVM()->SweepJniWeakGlobals(visitor, arg);
+ Dbg::UpdateObjectPointers(visitor, arg);
}
static gc::CollectorType ParseCollectorType(const std::string& option) {
@@ -1495,12 +1496,14 @@ void Runtime::DisallowNewSystemWeaks() {
monitor_list_->DisallowNewMonitors();
intern_table_->DisallowNewInterns();
java_vm_->DisallowNewWeakGlobals();
+ Dbg::DisallowNewObjectRegistryObjects();
}
void Runtime::AllowNewSystemWeaks() {
monitor_list_->AllowNewMonitors();
intern_table_->AllowNewInterns();
java_vm_->AllowNewWeakGlobals();
+ Dbg::AllowNewObjectRegistryObjects();
}
void Runtime::SetCalleeSaveMethod(mirror::ArtMethod* method, CalleeSaveType type) {