summaryrefslogtreecommitdiffstats
path: root/runtime
diff options
context:
space:
mode:
authorHiroshi Yamauchi <yamauchi@google.com>2014-04-04 02:54:16 +0000
committerGerrit Code Review <noreply-gerritcodereview@google.com>2014-04-04 02:54:16 +0000
commite9236f03d8c4348a8f1a8767baa1aea60ecdc1f5 (patch)
treeba50bc7c65e03ac9ea16a24375cef68c629a59d1 /runtime
parente5893f8fb70c58fe9950c1fc8b1023e32ca34637 (diff)
parent4cd662e54440f76fc920cb2c67acab3bba8b33dd (diff)
downloadart-e9236f03d8c4348a8f1a8767baa1aea60ecdc1f5.zip
art-e9236f03d8c4348a8f1a8767baa1aea60ecdc1f5.tar.gz
art-e9236f03d8c4348a8f1a8767baa1aea60ecdc1f5.tar.bz2
Merge "Fix Object::Clone()'s pre-fence barrier."
Diffstat (limited to 'runtime')
-rw-r--r--runtime/class_linker.cc5
-rw-r--r--runtime/gc/allocator/rosalloc.cc2
-rw-r--r--runtime/gc/heap-inl.h18
-rw-r--r--runtime/gc/heap.h15
-rw-r--r--runtime/mirror/class-inl.h11
-rw-r--r--runtime/mirror/object.cc28
-rw-r--r--runtime/sirt_ref-inl.h13
-rw-r--r--runtime/sirt_ref.h16
8 files changed, 80 insertions, 28 deletions
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 6c5406e..78b7cc0 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -202,7 +202,7 @@ void ClassLinker::InitFromCompiler(const std::vector<const DexFile*>& boot_class
// The GC can't handle an object with a null class since we can't get the size of this object.
heap->IncrementDisableMovingGC(self);
SirtRef<mirror::Class> java_lang_Class(self, down_cast<mirror::Class*>(
- heap->AllocNonMovableObject<true>(self, nullptr, sizeof(mirror::ClassClass))));
+ heap->AllocNonMovableObject<true>(self, nullptr, sizeof(mirror::ClassClass), VoidFunctor())));
CHECK(java_lang_Class.get() != NULL);
mirror::Class::SetClassClass(java_lang_Class.get());
java_lang_Class->SetClass(java_lang_Class.get());
@@ -1180,7 +1180,8 @@ mirror::DexCache* ClassLinker::AllocDexCache(Thread* self, const DexFile& dex_fi
SirtRef<mirror::Class> dex_cache_class(self, GetClassRoot(kJavaLangDexCache));
SirtRef<mirror::DexCache> dex_cache(
self, down_cast<mirror::DexCache*>(
- heap->AllocObject<true>(self, dex_cache_class.get(), dex_cache_class->GetObjectSize())));
+ heap->AllocObject<true>(self, dex_cache_class.get(), dex_cache_class->GetObjectSize(),
+ VoidFunctor())));
if (dex_cache.get() == NULL) {
return NULL;
}
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index f5f6f16..920741f 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -1997,6 +1997,8 @@ void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc) {
CHECK_LE(obj_size, kLargeSizeThreshold)
<< "A run slot contains a large object " << Dump();
CHECK_EQ(SizeToIndex(obj_size), idx)
+ << PrettyTypeOf(obj) << " "
+ << "obj_size=" << obj_size << ", idx=" << idx << " "
<< "A run slot contains an object with wrong size " << Dump();
}
}
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 25f20d6..a06f272 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -65,7 +65,7 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas
bool after_is_current_allocator = allocator == GetCurrentAllocator();
if (is_current_allocator && !after_is_current_allocator) {
// If the allocator changed, we need to restart the allocation.
- return AllocObject<kInstrumented>(self, klass, byte_count);
+ return AllocObject<kInstrumented>(self, klass, byte_count, pre_fence_visitor);
}
return nullptr;
}
@@ -111,7 +111,7 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas
DCHECK(!Runtime::Current()->HasStatsEnabled());
}
if (AllocatorHasAllocationStack(allocator)) {
- PushOnAllocationStack(self, obj);
+ PushOnAllocationStack(self, &obj);
}
if (kInstrumented) {
if (Dbg::IsAllocTrackingEnabled()) {
@@ -135,28 +135,34 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas
// The size of a thread-local allocation stack in the number of references.
static constexpr size_t kThreadLocalAllocationStackSize = 128;
-inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object* obj) {
+inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object** obj) {
if (kUseThreadLocalAllocationStack) {
- bool success = self->PushOnThreadLocalAllocationStack(obj);
+ bool success = self->PushOnThreadLocalAllocationStack(*obj);
if (UNLIKELY(!success)) {
// Slow path. Allocate a new thread-local allocation stack.
mirror::Object** start_address;
mirror::Object** end_address;
while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize,
&start_address, &end_address)) {
+ // Disable verify object in SirtRef as obj isn't on the alloc stack yet.
+ SirtRefNoVerify<mirror::Object> ref(self, *obj);
CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
+ *obj = ref.get();
}
self->SetThreadLocalAllocationStack(start_address, end_address);
// Retry on the new thread-local allocation stack.
- success = self->PushOnThreadLocalAllocationStack(obj);
+ success = self->PushOnThreadLocalAllocationStack(*obj);
// Must succeed.
CHECK(success);
}
} else {
// This is safe to do since the GC will never free objects which are neither in the allocation
// stack or the live bitmap.
- while (!allocation_stack_->AtomicPushBack(obj)) {
+ while (!allocation_stack_->AtomicPushBack(*obj)) {
+ // Disable verify object in SirtRef as obj isn't on the alloc stack yet.
+ SirtRefNoVerify<mirror::Object> ref(self, *obj);
CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
+ *obj = ref.get();
}
}
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 5879757..ffb4e59 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -158,28 +158,28 @@ class Heap {
~Heap();
// Allocates and initializes storage for an object instance.
- template <bool kInstrumented, typename PreFenceVisitor = VoidFunctor>
+ template <bool kInstrumented, typename PreFenceVisitor>
mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes,
- const PreFenceVisitor& pre_fence_visitor = VoidFunctor())
+ const PreFenceVisitor& pre_fence_visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes,
GetCurrentAllocator(),
pre_fence_visitor);
}
- template <bool kInstrumented, typename PreFenceVisitor = VoidFunctor>
+ template <bool kInstrumented, typename PreFenceVisitor>
mirror::Object* AllocNonMovableObject(Thread* self, mirror::Class* klass, size_t num_bytes,
- const PreFenceVisitor& pre_fence_visitor = VoidFunctor())
+ const PreFenceVisitor& pre_fence_visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes,
GetCurrentNonMovingAllocator(),
pre_fence_visitor);
}
- template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor = VoidFunctor>
+ template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator(
Thread* self, mirror::Class* klass, size_t byte_count, AllocatorType allocator,
- const PreFenceVisitor& pre_fence_visitor = VoidFunctor())
+ const PreFenceVisitor& pre_fence_visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
AllocatorType GetCurrentAllocator() const {
@@ -691,7 +691,8 @@ class Heap {
void SignalHeapTrimDaemon(Thread* self);
// Push an object onto the allocation stack.
- void PushOnAllocationStack(Thread* self, mirror::Object* obj);
+ void PushOnAllocationStack(Thread* self, mirror::Object** obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
// sweep GC, false for other GC types.
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 89d9241..025e62a 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -442,7 +442,14 @@ inline void Class::SetName(String* name) {
}
inline void Class::CheckObjectAlloc() {
- DCHECK(!IsArrayClass()) << PrettyClass(this);
+ DCHECK(!IsArrayClass())
+ << PrettyClass(this)
+ << "A array shouldn't be allocated through this "
+ << "as it requires a pre-fence visitor that sets the class size.";
+ DCHECK(!IsClassClass())
+ << PrettyClass(this)
+ << "A class object shouldn't be allocated through this "
+ << "as it requires a pre-fence visitor that sets the class size.";
DCHECK(IsInstantiable()) << PrettyClass(this);
// TODO: decide whether we want this check. It currently fails during bootstrap.
// DCHECK(!Runtime::Current()->IsStarted() || IsInitializing()) << PrettyClass(this);
@@ -454,7 +461,7 @@ inline Object* Class::Alloc(Thread* self, gc::AllocatorType allocator_type) {
CheckObjectAlloc();
gc::Heap* heap = Runtime::Current()->GetHeap();
return heap->AllocObjectWithAllocator<kIsInstrumented, false>(self, this, this->object_size_,
- allocator_type);
+ allocator_type, VoidFunctor());
}
inline Object* Class::AllocObject(Thread* self) {
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index f1485e5..d9155f5 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -66,6 +66,26 @@ static Object* CopyObject(Thread* self, mirror::Object* dest, mirror::Object* sr
return dest;
}
+// An allocation pre-fence visitor that copies the object.
+class CopyObjectVisitor {
+ public:
+ explicit CopyObjectVisitor(Thread* self, SirtRef<Object>* orig, size_t num_bytes)
+ : self_(self), orig_(orig), num_bytes_(num_bytes) {
+ }
+
+ void operator()(Object* obj, size_t usable_size) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(usable_size);
+ CopyObject(self_, obj, orig_->get(), num_bytes_);
+ }
+
+ private:
+ Thread* const self_;
+ SirtRef<Object>* const orig_;
+ const size_t num_bytes_;
+ DISALLOW_COPY_AND_ASSIGN(CopyObjectVisitor);
+};
+
Object* Object::Clone(Thread* self) {
CHECK(!IsClass()) << "Can't clone classes.";
// Object::SizeOf gets the right size even if we're an array. Using c->AllocObject() here would
@@ -74,13 +94,11 @@ Object* Object::Clone(Thread* self) {
size_t num_bytes = SizeOf();
SirtRef<Object> this_object(self, this);
Object* copy;
+ CopyObjectVisitor visitor(self, &this_object, num_bytes);
if (heap->IsMovableObject(this)) {
- copy = heap->AllocObject<true>(self, GetClass(), num_bytes);
+ copy = heap->AllocObject<true>(self, GetClass(), num_bytes, visitor);
} else {
- copy = heap->AllocNonMovableObject<true>(self, GetClass(), num_bytes);
- }
- if (LIKELY(copy != nullptr)) {
- return CopyObject(self, copy, this_object.get(), num_bytes);
+ copy = heap->AllocNonMovableObject<true>(self, GetClass(), num_bytes, visitor);
}
return copy;
}
diff --git a/runtime/sirt_ref-inl.h b/runtime/sirt_ref-inl.h
index 7f2d847..7de624a 100644
--- a/runtime/sirt_ref-inl.h
+++ b/runtime/sirt_ref-inl.h
@@ -23,8 +23,11 @@
namespace art {
-template<class T> inline SirtRef<T>::SirtRef(Thread* self, T* object) : self_(self), sirt_(object) {
- VerifyObject(object);
+template<class T> inline SirtRef<T>::SirtRef(Thread* self, T* object, bool should_verify)
+ : self_(self), sirt_(object) {
+ if (should_verify) {
+ VerifyObject(object);
+ }
self_->PushSirt(&sirt_);
}
@@ -33,8 +36,10 @@ template<class T> inline SirtRef<T>::~SirtRef() {
DCHECK_EQ(top_sirt, &sirt_);
}
-template<class T> inline T* SirtRef<T>::reset(T* object) {
- VerifyObject(object);
+template<class T> inline T* SirtRef<T>::reset(T* object, bool should_verify) {
+ if (should_verify) {
+ VerifyObject(object);
+ }
T* old_ref = get();
sirt_.SetReference(0, object);
return old_ref;
diff --git a/runtime/sirt_ref.h b/runtime/sirt_ref.h
index 2226e17..cf23891 100644
--- a/runtime/sirt_ref.h
+++ b/runtime/sirt_ref.h
@@ -28,7 +28,7 @@ namespace art {
template<class T>
class SirtRef {
public:
- SirtRef(Thread* self, T* object);
+ SirtRef(Thread* self, T* object, bool should_verify = true);
~SirtRef();
T& operator*() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -42,7 +42,8 @@ class SirtRef {
}
// Returns the old reference.
- T* reset(T* object = nullptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ T* reset(T* object = nullptr, bool should_verify = true)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
Thread* const self_;
@@ -51,6 +52,17 @@ class SirtRef {
DISALLOW_COPY_AND_ASSIGN(SirtRef);
};
+// A version of SirtRef which disables the object verification.
+template<class T>
+class SirtRefNoVerify : public SirtRef<T> {
+ public:
+ SirtRefNoVerify(Thread* self, T* object) : SirtRef<T>(self, object, false) {}
+ // Returns the old reference.
+ T* reset(T* object = nullptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return SirtRef<T>::reset(object, false);
+ }
+};
+
} // namespace art
#endif // ART_RUNTIME_SIRT_REF_H_