summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--compiler/image_writer.cc62
-rw-r--r--dex2oat/dex2oat.cc2
-rw-r--r--runtime/class_linker.cc2
-rw-r--r--runtime/gc/collector/mark_sweep-inl.h50
-rw-r--r--runtime/gc/collector/mark_sweep.cc4
-rw-r--r--runtime/gc/collector/semi_space.cc6
-rw-r--r--runtime/gc/heap-inl.h9
-rw-r--r--runtime/gc/heap.cc61
-rw-r--r--runtime/gc/heap.h36
-rw-r--r--runtime/gc/space/malloc_space.cc3
-rw-r--r--runtime/gc/space/rosalloc_space.h3
-rw-r--r--runtime/gc/space/space_test.cc244
-rw-r--r--runtime/interpreter/interpreter_common.cc2
-rw-r--r--runtime/mirror/array-inl.h7
-rw-r--r--runtime/mirror/array.h11
-rw-r--r--runtime/mirror/art_method-inl.h7
-rw-r--r--runtime/mirror/art_method.cc5
-rw-r--r--runtime/mirror/art_method.h40
-rw-r--r--runtime/mirror/class-inl.h10
-rw-r--r--runtime/mirror/class.h111
-rw-r--r--runtime/mirror/object-inl.h258
-rw-r--r--runtime/mirror/object.cc7
-rw-r--r--runtime/mirror/object.h126
-rw-r--r--runtime/mirror/object_array-inl.h28
-rw-r--r--runtime/mirror/object_array.h13
-rw-r--r--runtime/native/java_lang_String.cc1
-rw-r--r--runtime/native/java_lang_Thread.cc1
-rw-r--r--runtime/native/java_lang_reflect_Proxy.cc1
-rw-r--r--runtime/native/scoped_fast_native_object_access.h5
-rw-r--r--runtime/scoped_thread_state_change.h5
-rw-r--r--runtime/sirt_ref.h5
-rw-r--r--runtime/stack.cc5
-rw-r--r--runtime/stack.h32
-rw-r--r--runtime/thread-inl.h7
-rw-r--r--runtime/thread.cc17
-rw-r--r--runtime/thread.h5
-rw-r--r--runtime/verify_object-inl.h61
-rw-r--r--runtime/verify_object.h62
38 files changed, 772 insertions, 542 deletions
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 60beebb..58f66b9 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -581,14 +581,15 @@ void ImageWriter::CopyAndFixupObjectsCallback(Object* obj, void* arg) {
void ImageWriter::FixupObject(Object* orig, Object* copy) {
DCHECK(orig != NULL);
DCHECK(copy != NULL);
- copy->SetClass(down_cast<Class*>(GetImageAddress(orig->GetClass())));
+ copy->SetClass<kVerifyNone>(down_cast<Class*>(GetImageAddress(orig->GetClass())));
// TODO: special case init of pointers to malloc data (or removal of these pointers)
- if (orig->IsClass()) {
- FixupClass(orig->AsClass(), down_cast<Class*>(copy));
- } else if (orig->IsObjectArray()) {
- FixupObjectArray(orig->AsObjectArray<Object>(), down_cast<ObjectArray<Object>*>(copy));
- } else if (orig->IsArtMethod()) {
- FixupMethod(orig->AsArtMethod(), down_cast<ArtMethod*>(copy));
+ if (orig->IsClass<kVerifyNone>()) {
+ FixupClass(orig->AsClass<kVerifyNone>(), down_cast<Class*>(copy));
+ } else if (orig->IsObjectArray<kVerifyNone>()) {
+ FixupObjectArray(orig->AsObjectArray<Object, kVerifyNone>(),
+ down_cast<ObjectArray<Object>*>(copy));
+ } else if (orig->IsArtMethod<kVerifyNone>()) {
+ FixupMethod(orig->AsArtMethod<kVerifyNone>(), down_cast<ArtMethod*>(copy));
} else {
FixupInstanceFields(orig, copy);
}
@@ -607,54 +608,54 @@ void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) {
// The resolution method has a special trampoline to call.
if (UNLIKELY(orig == Runtime::Current()->GetResolutionMethod())) {
- copy->SetEntryPointFromPortableCompiledCode(GetOatAddress(portable_resolution_trampoline_offset_));
- copy->SetEntryPointFromQuickCompiledCode(GetOatAddress(quick_resolution_trampoline_offset_));
+ copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(GetOatAddress(portable_resolution_trampoline_offset_));
+ copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_resolution_trampoline_offset_));
} else if (UNLIKELY(orig == Runtime::Current()->GetImtConflictMethod())) {
- copy->SetEntryPointFromPortableCompiledCode(GetOatAddress(portable_imt_conflict_trampoline_offset_));
- copy->SetEntryPointFromQuickCompiledCode(GetOatAddress(quick_imt_conflict_trampoline_offset_));
+ copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(GetOatAddress(portable_imt_conflict_trampoline_offset_));
+ copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_imt_conflict_trampoline_offset_));
} else {
// We assume all methods have code. If they don't currently then we set them to the use the
// resolution trampoline. Abstract methods never have code and so we need to make sure their
// use results in an AbstractMethodError. We use the interpreter to achieve this.
if (UNLIKELY(orig->IsAbstract())) {
- copy->SetEntryPointFromPortableCompiledCode(GetOatAddress(portable_to_interpreter_bridge_offset_));
- copy->SetEntryPointFromQuickCompiledCode(GetOatAddress(quick_to_interpreter_bridge_offset_));
- copy->SetEntryPointFromInterpreter(reinterpret_cast<EntryPointFromInterpreter*>
+ copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(GetOatAddress(portable_to_interpreter_bridge_offset_));
+ copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_to_interpreter_bridge_offset_));
+ copy->SetEntryPointFromInterpreter<kVerifyNone>(reinterpret_cast<EntryPointFromInterpreter*>
(const_cast<byte*>(GetOatAddress(interpreter_to_interpreter_bridge_offset_))));
} else {
- copy->SetEntryPointFromInterpreter(reinterpret_cast<EntryPointFromInterpreter*>
+ copy->SetEntryPointFromInterpreter<kVerifyNone>(reinterpret_cast<EntryPointFromInterpreter*>
(const_cast<byte*>(GetOatAddress(interpreter_to_compiled_code_bridge_offset_))));
// Use original code if it exists. Otherwise, set the code pointer to the resolution
// trampoline.
const byte* quick_code = GetOatAddress(orig->GetQuickOatCodeOffset());
if (quick_code != nullptr) {
- copy->SetEntryPointFromQuickCompiledCode(quick_code);
+ copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(quick_code);
} else {
- copy->SetEntryPointFromQuickCompiledCode(GetOatAddress(quick_resolution_trampoline_offset_));
+ copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_resolution_trampoline_offset_));
}
const byte* portable_code = GetOatAddress(orig->GetPortableOatCodeOffset());
if (portable_code != nullptr) {
- copy->SetEntryPointFromPortableCompiledCode(portable_code);
+ copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(portable_code);
} else {
- copy->SetEntryPointFromPortableCompiledCode(GetOatAddress(portable_resolution_trampoline_offset_));
+ copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(GetOatAddress(portable_resolution_trampoline_offset_));
}
if (orig->IsNative()) {
// The native method's pointer is set to a stub to lookup via dlsym.
// Note this is not the code_ pointer, that is handled above.
- copy->SetNativeMethod(GetOatAddress(jni_dlsym_lookup_offset_));
+ copy->SetNativeMethod<kVerifyNone>(GetOatAddress(jni_dlsym_lookup_offset_));
} else {
// Normal (non-abstract non-native) methods have various tables to relocate.
uint32_t mapping_table_off = orig->GetOatMappingTableOffset();
const byte* mapping_table = GetOatAddress(mapping_table_off);
- copy->SetMappingTable(mapping_table);
+ copy->SetMappingTable<kVerifyNone>(mapping_table);
uint32_t vmap_table_offset = orig->GetOatVmapTableOffset();
const byte* vmap_table = GetOatAddress(vmap_table_offset);
- copy->SetVmapTable(vmap_table);
+ copy->SetVmapTable<kVerifyNone>(vmap_table);
uint32_t native_gc_map_offset = orig->GetOatNativeGcMapOffset();
const byte* native_gc_map = GetOatAddress(native_gc_map_offset);
- copy->SetNativeGcMap(reinterpret_cast<const uint8_t*>(native_gc_map));
+ copy->SetNativeGcMap<kVerifyNone>(reinterpret_cast<const uint8_t*>(native_gc_map));
}
}
}
@@ -663,7 +664,7 @@ void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) {
void ImageWriter::FixupObjectArray(ObjectArray<Object>* orig, ObjectArray<Object>* copy) {
for (int32_t i = 0; i < orig->GetLength(); ++i) {
Object* element = orig->Get(i);
- copy->SetWithoutChecksAndWriteBarrier<false>(i, GetImageAddress(element));
+ copy->SetWithoutChecksAndWriteBarrier<false, true, kVerifyNone>(i, GetImageAddress(element));
}
}
@@ -690,10 +691,11 @@ void ImageWriter::FixupFields(Object* orig,
while (ref_offsets != 0) {
size_t right_shift = CLZ(ref_offsets);
MemberOffset byte_offset = CLASS_OFFSET_FROM_CLZ(right_shift);
- Object* ref = orig->GetFieldObject<Object>(byte_offset, false);
+ Object* ref = orig->GetFieldObject<Object, kVerifyNone>(byte_offset, false);
// Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the
// image.
- copy->SetFieldObjectWithoutWriteBarrier<false>(byte_offset, GetImageAddress(ref), false);
+ copy->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
+ byte_offset, GetImageAddress(ref), false);
ref_offsets &= ~(CLASS_HIGH_BIT >> right_shift);
}
} else {
@@ -712,10 +714,11 @@ void ImageWriter::FixupFields(Object* orig,
? klass->GetStaticField(i)
: klass->GetInstanceField(i));
MemberOffset field_offset = field->GetOffset();
- Object* ref = orig->GetFieldObject<Object>(field_offset, false);
+ Object* ref = orig->GetFieldObject<Object, kVerifyNone>(field_offset, false);
// Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the
// image.
- copy->SetFieldObjectWithoutWriteBarrier<false>(field_offset, GetImageAddress(ref), false);
+ copy->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
+ field_offset, GetImageAddress(ref), false);
}
}
}
@@ -726,7 +729,8 @@ void ImageWriter::FixupFields(Object* orig,
Object* ref = orig->GetFieldObject<Object>(field_offset, false);
// Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the
// image.
- copy->SetFieldObjectWithoutWriteBarrier<false>(field_offset, GetImageAddress(ref), false);
+ copy->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
+ field_offset, GetImageAddress(ref), false);
}
}
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index fd26cf6..bfda17d 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -579,7 +579,7 @@ class WatchDog {
CHECK_GT(kWatchDogTimeoutSeconds, kWatchDogWarningSeconds);
// TODO: tune the multiplier for GC verification, the following is just to make the timeout
// large.
- int64_t multiplier = gc::kDesiredHeapVerification > gc::kVerifyAllFast ? 100 : 1;
+ int64_t multiplier = kVerifyObjectSupport > kVerifyObjectModeFast ? 100 : 1;
timespec warning_ts;
InitTimeSpec(true, CLOCK_REALTIME, multiplier * kWatchDogWarningSeconds * 1000, 0, &warning_ts);
timespec timeout_ts;
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index fa50324..48ec5ab 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -2308,7 +2308,7 @@ mirror::Class* ClassLinker::InsertClass(const char* descriptor, mirror::Class* k
CHECK(klass == existing);
}
}
- Runtime::Current()->GetHeap()->VerifyObject(klass);
+ VerifyObject(klass);
class_table_.insert(std::make_pair(hash, klass));
class_table_dirty_ = true;
return NULL;
diff --git a/runtime/gc/collector/mark_sweep-inl.h b/runtime/gc/collector/mark_sweep-inl.h
index d148ae5..4915532 100644
--- a/runtime/gc/collector/mark_sweep-inl.h
+++ b/runtime/gc/collector/mark_sweep-inl.h
@@ -30,19 +30,19 @@ namespace collector {
template <typename MarkVisitor>
inline void MarkSweep::ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor) {
- DCHECK(obj != NULL);
if (kIsDebugBuild && !IsMarked(obj)) {
heap_->DumpSpaces();
LOG(FATAL) << "Scanning unmarked object " << obj;
}
+ // The GetClass verifies the object, don't need to reverify after.
mirror::Class* klass = obj->GetClass();
- DCHECK(klass != NULL);
+ // IsArrayClass verifies klass.
if (UNLIKELY(klass->IsArrayClass())) {
if (kCountScannedTypes) {
++array_count_;
}
- if (klass->IsObjectArrayClass()) {
- VisitObjectArrayReferences(obj->AsObjectArray<mirror::Object>(), visitor);
+ if (klass->IsObjectArrayClass<kVerifyNone>()) {
+ VisitObjectArrayReferences(obj->AsObjectArray<mirror::Object, kVerifyNone>(), visitor);
}
} else if (UNLIKELY(klass == mirror::Class::GetJavaLangClass())) {
if (kCountScannedTypes) {
@@ -54,7 +54,7 @@ inline void MarkSweep::ScanObjectVisit(mirror::Object* obj, const MarkVisitor& v
++other_count_;
}
VisitOtherReferences(klass, obj, visitor);
- if (UNLIKELY(klass->IsReferenceClass())) {
+ if (UNLIKELY(klass->IsReferenceClass<kVerifyNone>())) {
DelayReferenceReferent(klass, obj);
}
}
@@ -65,24 +65,19 @@ inline void MarkSweep::VisitObjectReferences(mirror::Object* obj, const Visitor&
bool visit_class)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
Locks::mutator_lock_) {
- DCHECK(obj != NULL);
- DCHECK(obj->GetClass() != NULL);
mirror::Class* klass = obj->GetClass();
- DCHECK(klass != NULL);
- if (klass == mirror::Class::GetJavaLangClass()) {
- DCHECK_EQ(klass->GetClass(), mirror::Class::GetJavaLangClass());
+ if (klass->IsArrayClass()) {
+ if (visit_class) {
+ visitor(obj, klass, mirror::Object::ClassOffset(), false);
+ }
+ if (klass->IsObjectArrayClass<kVerifyNone>()) {
+ VisitObjectArrayReferences(obj->AsObjectArray<mirror::Object, kVerifyNone>(), visitor);
+ }
+ } else if (klass == mirror::Class::GetJavaLangClass()) {
+ DCHECK_EQ(klass->GetClass<kVerifyNone>(), mirror::Class::GetJavaLangClass());
VisitClassReferences(klass, obj, visitor);
} else {
- if (klass->IsArrayClass()) {
- if (visit_class) {
- visitor(obj, klass, mirror::Object::ClassOffset(), false);
- }
- if (klass->IsObjectArrayClass()) {
- VisitObjectArrayReferences(obj->AsObjectArray<mirror::Object>(), visitor);
- }
- } else {
- VisitOtherReferences(klass, obj, visitor);
- }
+ VisitOtherReferences(klass, obj, visitor);
}
}
@@ -90,9 +85,7 @@ template <typename Visitor>
inline void MarkSweep::VisitInstanceFieldsReferences(mirror::Class* klass, mirror::Object* obj,
const Visitor& visitor)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
- DCHECK(obj != NULL);
- DCHECK(klass != NULL);
- VisitFieldsReferences(obj, klass->GetReferenceInstanceOffsets(), false, visitor);
+ VisitFieldsReferences(obj, klass->GetReferenceInstanceOffsets<kVerifyNone>(), false, visitor);
}
template <typename Visitor>
@@ -100,14 +93,13 @@ inline void MarkSweep::VisitClassReferences(mirror::Class* klass, mirror::Object
const Visitor& visitor)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
VisitInstanceFieldsReferences(klass, obj, visitor);
- VisitStaticFieldsReferences(obj->AsClass(), visitor);
+ VisitStaticFieldsReferences(obj->AsClass<kVerifyNone>(), visitor);
}
template <typename Visitor>
inline void MarkSweep::VisitStaticFieldsReferences(mirror::Class* klass, const Visitor& visitor)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
- DCHECK(klass != NULL);
- VisitFieldsReferences(klass, klass->GetReferenceStaticOffsets(), true, visitor);
+ VisitFieldsReferences(klass, klass->GetReferenceStaticOffsets<kVerifyNone>(), true, visitor);
}
template <typename Visitor>
@@ -118,7 +110,7 @@ inline void MarkSweep::VisitFieldsReferences(mirror::Object* obj, uint32_t ref_o
while (ref_offsets != 0) {
size_t right_shift = CLZ(ref_offsets);
MemberOffset field_offset = CLASS_OFFSET_FROM_CLZ(right_shift);
- mirror::Object* ref = obj->GetFieldObject<mirror::Object>(field_offset, false);
+ mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyReads>(field_offset, false);
visitor(obj, ref, field_offset, is_static);
ref_offsets &= ~(CLASS_HIGH_BIT >> right_shift);
}
@@ -127,7 +119,7 @@ inline void MarkSweep::VisitFieldsReferences(mirror::Object* obj, uint32_t ref_o
// walk up the class inheritance hierarchy and find reference
// offsets the hard way. In the static case, just consider this
// class.
- for (mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
+ for (mirror::Class* klass = is_static ? obj->AsClass<kVerifyNone>() : obj->GetClass<kVerifyNone>();
klass != nullptr;
klass = is_static ? nullptr : klass->GetSuperClass()) {
size_t num_reference_fields = (is_static
@@ -137,7 +129,7 @@ inline void MarkSweep::VisitFieldsReferences(mirror::Object* obj, uint32_t ref_o
mirror::ArtField* field = (is_static ? klass->GetStaticField(i)
: klass->GetInstanceField(i));
MemberOffset field_offset = field->GetOffset();
- mirror::Object* ref = obj->GetFieldObject<mirror::Object>(field_offset, false);
+ mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyReads>(field_offset, false);
visitor(obj, ref, field_offset, is_static);
}
}
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 7b9d675..fb797e0 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -533,15 +533,11 @@ void MarkSweep::MarkRoot(const Object* obj) {
void MarkSweep::MarkRootParallelCallback(mirror::Object** root, void* arg, uint32_t /*thread_id*/,
RootType /*root_type*/) {
- DCHECK(root != NULL);
- DCHECK(arg != NULL);
reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNullParallel(*root);
}
void MarkSweep::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/,
RootType /*root_type*/) {
- DCHECK(root != nullptr);
- DCHECK(arg != nullptr);
reinterpret_cast<MarkSweep*>(arg)->MarkObjectNonNull(*root);
}
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 882867b..fe8c253 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -666,11 +666,11 @@ void SemiSpace::ScanObject(Object* obj) {
// case since it does not dirty cards and use additional memory.
// Since we do not change the actual object, we can safely use non-transactional mode. Also
// disable check as we could run inside a transaction.
- obj->SetFieldObjectWithoutWriteBarrier<false, false>(offset, new_address, false);
+ obj->SetFieldObjectWithoutWriteBarrier<false, false, kVerifyNone>(offset, new_address, false);
}
}, kMovingClasses);
- mirror::Class* klass = obj->GetClass();
- if (UNLIKELY(klass->IsReferenceClass())) {
+ mirror::Class* klass = obj->GetClass<kVerifyNone>();
+ if (UNLIKELY(klass->IsReferenceClass<kVerifyNone>())) {
DelayReferenceReferent(klass, obj);
}
}
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 9c91b0e..3d591f0 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -28,6 +28,7 @@
#include "runtime.h"
#include "thread.h"
#include "thread-inl.h"
+#include "verify_object-inl.h"
namespace art {
namespace gc {
@@ -98,12 +99,8 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas
if (AllocatorMayHaveConcurrentGC(allocator) && concurrent_gc_) {
CheckConcurrentGC(self, new_num_bytes_allocated, obj);
}
- if (kIsDebugBuild) {
- if (kDesiredHeapVerification > kNoHeapVerification) {
- VerifyObject(obj);
- }
- self->VerifyStack();
- }
+ VerifyObject(obj);
+ self->VerifyStack();
return obj;
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 8c89cdc..b970df3 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -135,7 +135,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
* searching.
*/
max_allocation_stack_size_(kGCALotMode ? kGcAlotInterval
- : (kDesiredHeapVerification > kVerifyAllFast) ? KB : MB),
+ : (kVerifyObjectSupport > kVerifyObjectModeFast) ? KB : MB),
current_allocator_(kAllocatorTypeDlMalloc),
current_non_moving_allocator_(kAllocatorTypeNonMoving),
bump_pointer_space_(nullptr),
@@ -150,7 +150,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
target_utilization_(target_utilization),
total_wait_time_(0),
total_allocation_time_(0),
- verify_object_mode_(kHeapVerificationNotPermitted),
+ verify_object_mode_(kVerifyObjectModeDisabled),
disable_moving_gc_count_(0),
running_on_valgrind_(RUNNING_ON_VALGRIND),
use_tlab_(use_tlab) {
@@ -314,9 +314,7 @@ void Heap::ChangeAllocator(AllocatorType allocator) {
bool Heap::IsCompilingBoot() const {
for (const auto& space : continuous_spaces_) {
- if (space->IsImageSpace()) {
- return false;
- } else if (space->IsZygoteSpace()) {
+ if (space->IsImageSpace() || space->IsZygoteSpace()) {
return false;
}
}
@@ -823,14 +821,16 @@ bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
return false;
}
if (bump_pointer_space_ != nullptr && bump_pointer_space_->HasAddress(obj)) {
- mirror::Class* klass = obj->GetClass();
+ mirror::Class* klass = obj->GetClass<kVerifyNone>();
if (obj == klass) {
// This case happens for java.lang.Class.
return true;
}
return VerifyClassClass(klass) && IsLiveObjectLocked(klass);
} else if (temp_space_ != nullptr && temp_space_->HasAddress(obj)) {
- return false;
+ // If we are in the allocated region of the temp space, then we are probably live (e.g. during
+ // a GC). When a GC isn't running End() - Begin() is 0 which means no objects are contained.
+ return temp_space_->Contains(obj);
}
space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
space::DiscontinuousSpace* d_space = NULL;
@@ -886,25 +886,6 @@ bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
return false;
}
-void Heap::VerifyObjectImpl(mirror::Object* obj) {
- if (Thread::Current() == NULL ||
- Runtime::Current()->GetThreadList()->GetLockOwner() == Thread::Current()->GetTid()) {
- return;
- }
- VerifyObjectBody(obj);
-}
-
-bool Heap::VerifyClassClass(const mirror::Class* c) const {
- // Note: we don't use the accessors here as they have internal sanity checks that we don't want
- // to run
- const byte* raw_addr =
- reinterpret_cast<const byte*>(c) + mirror::Object::ClassOffset().Int32Value();
- mirror::Class* c_c = reinterpret_cast<mirror::HeapReference<mirror::Class> const *>(raw_addr)->AsMirrorPtr();
- raw_addr = reinterpret_cast<const byte*>(c_c) + mirror::Object::ClassOffset().Int32Value();
- mirror::Class* c_c_c = reinterpret_cast<mirror::HeapReference<mirror::Class> const *>(raw_addr)->AsMirrorPtr();
- return c_c == c_c_c;
-}
-
void Heap::DumpSpaces(std::ostream& stream) {
for (const auto& space : continuous_spaces_) {
accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
@@ -923,36 +904,30 @@ void Heap::DumpSpaces(std::ostream& stream) {
}
void Heap::VerifyObjectBody(mirror::Object* obj) {
- CHECK(IsAligned<kObjectAlignment>(obj)) << "Object isn't aligned: " << obj;
+ if (this == nullptr && verify_object_mode_ == kVerifyObjectModeDisabled) {
+ return;
+ }
// Ignore early dawn of the universe verifications.
if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.Load()) < 10 * KB)) {
return;
}
- const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
- mirror::Object::ClassOffset().Int32Value();
- mirror::Class* c = reinterpret_cast<mirror::HeapReference<mirror::Class> const *>(raw_addr)->AsMirrorPtr();
- if (UNLIKELY(c == NULL)) {
- LOG(FATAL) << "Null class in object: " << obj;
- } else if (UNLIKELY(!IsAligned<kObjectAlignment>(c))) {
- LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
- }
+ CHECK(IsAligned<kObjectAlignment>(obj)) << "Object isn't aligned: " << obj;
+ mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(
+ mirror::Object::ClassOffset(), false);
+ CHECK(c != nullptr) << "Null class in object " << obj;
+ CHECK(IsAligned<kObjectAlignment>(c)) << "Class " << c << " not aligned in object " << obj;
CHECK(VerifyClassClass(c));
- if (verify_object_mode_ > kVerifyAllFast) {
- // TODO: the bitmap tests below are racy if VerifyObjectBody is called without the
- // heap_bitmap_lock_.
+ if (verify_object_mode_ > kVerifyObjectModeFast) {
+ // Note: the bitmap tests below are racy since we don't hold the heap bitmap lock.
if (!IsLiveObjectLocked(obj)) {
DumpSpaces();
LOG(FATAL) << "Object is dead: " << obj;
}
- if (!IsLiveObjectLocked(c)) {
- LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
- }
}
}
void Heap::VerificationCallback(mirror::Object* obj, void* arg) {
- DCHECK(obj != NULL);
reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj);
}
@@ -1790,7 +1765,7 @@ class VerifyReferenceVisitor {
if (bitmap == nullptr) {
LOG(ERROR) << "Object " << obj << " has no bitmap";
- if (!heap_->VerifyClassClass(obj->GetClass())) {
+ if (!VerifyClassClass(obj->GetClass())) {
LOG(ERROR) << "Object " << obj << " failed class verification!";
}
} else {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 21a2365..83202a5 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -37,6 +37,7 @@
#include "reference_queue.h"
#include "safe_map.h"
#include "thread_pool.h"
+#include "verify_object.h"
namespace art {
@@ -99,15 +100,6 @@ enum AllocatorType {
kAllocatorTypeLOS, // Large object space, also doesn't have entrypoints.
};
-// How we want to sanity check the heap's correctness.
-enum HeapVerificationMode {
- kHeapVerificationNotPermitted, // Too early in runtime start-up for heap to be verified.
- kNoHeapVerification, // Production default.
- kVerifyAllFast, // Sanity check all heap accesses with quick(er) tests.
- kVerifyAll // Sanity check all heap accesses.
-};
-static constexpr HeapVerificationMode kDesiredHeapVerification = kNoHeapVerification;
-
// If true, use rosalloc/RosAllocSpace instead of dlmalloc/DlMallocSpace
static constexpr bool kUseRosAlloc = true;
@@ -208,14 +200,9 @@ class Heap {
void ChangeCollector(CollectorType collector_type);
// The given reference is believed to be to an object in the Java heap, check the soundness of it.
- void VerifyObjectImpl(mirror::Object* o);
- void VerifyObject(mirror::Object* o) {
- if (o != nullptr && this != nullptr && verify_object_mode_ > kNoHeapVerification) {
- VerifyObjectImpl(o);
- }
- }
- // Check that c.getClass() == c.getClass().getClass().
- bool VerifyClassClass(const mirror::Class* c) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // TODO: NO_THREAD_SAFETY_ANALYSIS since we call this everywhere and it is impossible to find a
+ // proper lock ordering for it.
+ void VerifyObjectBody(mirror::Object* o) NO_THREAD_SAFETY_ANALYSIS;
// Check sanity of all live references.
void VerifyHeap() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
@@ -347,21 +334,20 @@ class Heap {
// Enable verification of object references when the runtime is sufficiently initialized.
void EnableObjectValidation() {
- verify_object_mode_ = kDesiredHeapVerification;
- if (verify_object_mode_ > kNoHeapVerification) {
+ verify_object_mode_ = kVerifyObjectSupport;
+ if (verify_object_mode_ > kVerifyObjectModeDisabled) {
VerifyHeap();
}
}
// Disable object reference verification for image writing.
void DisableObjectValidation() {
- verify_object_mode_ = kHeapVerificationNotPermitted;
+ verify_object_mode_ = kVerifyObjectModeDisabled;
}
// Other checks may be performed if we know the heap should be in a sane state.
bool IsObjectValidationEnabled() const {
- return kDesiredHeapVerification > kNoHeapVerification &&
- verify_object_mode_ > kHeapVerificationNotPermitted;
+ return verify_object_mode_ > kVerifyObjectModeDisabled;
}
// Returns true if low memory mode is enabled.
@@ -665,10 +651,6 @@ class Heap {
LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
void RemoveSpace(space::Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
- // No thread saftey analysis since we call this everywhere and it is impossible to find a proper
- // lock ordering for it.
- void VerifyObjectBody(mirror::Object *obj) NO_THREAD_SAFETY_ANALYSIS;
-
static void VerificationCallback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(GlobalSychronization::heap_bitmap_lock_);
@@ -916,7 +898,7 @@ class Heap {
AtomicInteger total_allocation_time_;
// The current state of heap verification, may be enabled or disabled.
- HeapVerificationMode verify_object_mode_;
+ VerifyObjectMode verify_object_mode_;
// Compacting GC disable count, prevents compacting GC from running iff > 0.
size_t disable_moving_gc_count_ GUARDED_BY(gc_complete_lock_);
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index f90e6c7..ee31112 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -111,7 +111,8 @@ mirror::Class* MallocSpace::FindRecentFreedObject(const mirror::Object* obj) {
}
void MallocSpace::RegisterRecentFree(mirror::Object* ptr) {
- recent_freed_objects_[recent_free_pos_] = std::make_pair(ptr, ptr->GetClass());
+ // No verification since the object is dead.
+ recent_freed_objects_[recent_free_pos_] = std::make_pair(ptr, ptr->GetClass<kVerifyNone>());
recent_free_pos_ = (recent_free_pos_ + 1) & kRecentFreeMask;
}
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index 2377423..72e84f6 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -60,7 +60,8 @@ class RosAllocSpace : public MallocSpace {
// TODO: NO_THREAD_SAFETY_ANALYSIS because SizeOf() requires that mutator_lock is held.
void* obj_ptr = const_cast<void*>(reinterpret_cast<const void*>(obj));
// obj is a valid object. Use its class in the header to get the size.
- size_t size = obj->SizeOf();
+ // Don't use verification since the object may be dead if we are sweeping.
+ size_t size = obj->SizeOf<kVerifyNone>();
size_t size_by_size = rosalloc_->UsableSize(size);
if (kIsDebugBuild) {
size_t size_by_ptr = rosalloc_->UsableSize(obj_ptr);
diff --git a/runtime/gc/space/space_test.cc b/runtime/gc/space/space_test.cc
index 6d07a60..0b9f7ad 100644
--- a/runtime/gc/space/space_test.cc
+++ b/runtime/gc/space/space_test.cc
@@ -38,18 +38,20 @@ class SpaceTest : public CommonTest {
Runtime::Current()->GetHeap()->RevokeAllThreadLocalBuffers();
Runtime::Current()->GetHeap()->AddSpace(space);
}
- void InstallClass(mirror::Object* o, size_t size) NO_THREAD_SAFETY_ANALYSIS {
+ void InstallClass(SirtRef<mirror::Object>& o, size_t size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Note the minimum size, which is the size of a zero-length byte array.
EXPECT_GE(size, SizeOfZeroLengthByteArray());
- SirtRef<mirror::ClassLoader> null_loader(Thread::Current(), NULL);
- mirror::Class* byte_array_class = Runtime::Current()->GetClassLinker()->FindClass("[B", null_loader);
- EXPECT_TRUE(byte_array_class != NULL);
+ SirtRef<mirror::ClassLoader> null_loader(Thread::Current(), nullptr);
+ mirror::Class* byte_array_class = Runtime::Current()->GetClassLinker()->FindClass("[B",
+ null_loader);
+ EXPECT_TRUE(byte_array_class != nullptr);
o->SetClass(byte_array_class);
- mirror::Array* arr = o->AsArray();
+ mirror::Array* arr = o->AsArray<kVerifyNone>();
size_t header_size = SizeOfZeroLengthByteArray();
int32_t length = size - header_size;
arr->SetLength(length);
- EXPECT_EQ(arr->SizeOf(), size);
+ EXPECT_EQ(arr->SizeOf<kVerifyNone>(), size);
}
static size_t SizeOfZeroLengthByteArray() {
@@ -86,38 +88,38 @@ static size_t test_rand(size_t* seed) {
void SpaceTest::InitTestBody(CreateSpaceFn create_space) {
{
// Init < max == growth
- UniquePtr<Space> space(create_space("test", 16 * MB, 32 * MB, 32 * MB, NULL));
- EXPECT_TRUE(space.get() != NULL);
+ UniquePtr<Space> space(create_space("test", 16 * MB, 32 * MB, 32 * MB, nullptr));
+ EXPECT_TRUE(space.get() != nullptr);
}
{
// Init == max == growth
- UniquePtr<Space> space(create_space("test", 16 * MB, 16 * MB, 16 * MB, NULL));
- EXPECT_TRUE(space.get() != NULL);
+ UniquePtr<Space> space(create_space("test", 16 * MB, 16 * MB, 16 * MB, nullptr));
+ EXPECT_TRUE(space.get() != nullptr);
}
{
// Init > max == growth
- UniquePtr<Space> space(create_space("test", 32 * MB, 16 * MB, 16 * MB, NULL));
- EXPECT_TRUE(space.get() == NULL);
+ UniquePtr<Space> space(create_space("test", 32 * MB, 16 * MB, 16 * MB, nullptr));
+ EXPECT_TRUE(space.get() == nullptr);
}
{
// Growth == init < max
- UniquePtr<Space> space(create_space("test", 16 * MB, 16 * MB, 32 * MB, NULL));
- EXPECT_TRUE(space.get() != NULL);
+ UniquePtr<Space> space(create_space("test", 16 * MB, 16 * MB, 32 * MB, nullptr));
+ EXPECT_TRUE(space.get() != nullptr);
}
{
// Growth < init < max
- UniquePtr<Space> space(create_space("test", 16 * MB, 8 * MB, 32 * MB, NULL));
- EXPECT_TRUE(space.get() == NULL);
+ UniquePtr<Space> space(create_space("test", 16 * MB, 8 * MB, 32 * MB, nullptr));
+ EXPECT_TRUE(space.get() == nullptr);
}
{
// Init < growth < max
- UniquePtr<Space> space(create_space("test", 8 * MB, 16 * MB, 32 * MB, NULL));
- EXPECT_TRUE(space.get() != NULL);
+ UniquePtr<Space> space(create_space("test", 8 * MB, 16 * MB, 32 * MB, nullptr));
+ EXPECT_TRUE(space.get() != nullptr);
}
{
// Init < max < growth
- UniquePtr<Space> space(create_space("test", 8 * MB, 32 * MB, 16 * MB, NULL));
- EXPECT_TRUE(space.get() == NULL);
+ UniquePtr<Space> space(create_space("test", 8 * MB, 32 * MB, 16 * MB, nullptr));
+ EXPECT_TRUE(space.get() == nullptr);
}
}
@@ -134,52 +136,52 @@ TEST_F(SpaceTest, Init_RosAllocSpace) {
// the GC works with the ZygoteSpace.
void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) {
size_t dummy = 0;
- MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, NULL));
- ASSERT_TRUE(space != NULL);
+ MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
+ ASSERT_TRUE(space != nullptr);
// Make space findable to the heap, will also delete space when runtime is cleaned up
AddSpace(space);
Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
// Succeeds, fits without adjusting the footprint limit.
- mirror::Object* ptr1 = space->Alloc(self, 1 * MB, &dummy);
- EXPECT_TRUE(ptr1 != NULL);
+ SirtRef<mirror::Object> ptr1(self, space->Alloc(self, 1 * MB, &dummy));
+ EXPECT_TRUE(ptr1.get() != nullptr);
InstallClass(ptr1, 1 * MB);
// Fails, requires a higher footprint limit.
mirror::Object* ptr2 = space->Alloc(self, 8 * MB, &dummy);
- EXPECT_TRUE(ptr2 == NULL);
+ EXPECT_TRUE(ptr2 == nullptr);
// Succeeds, adjusts the footprint.
size_t ptr3_bytes_allocated;
- mirror::Object* ptr3 = space->AllocWithGrowth(self, 8 * MB, &ptr3_bytes_allocated);
- EXPECT_TRUE(ptr3 != NULL);
+ SirtRef<mirror::Object> ptr3(self, space->AllocWithGrowth(self, 8 * MB, &ptr3_bytes_allocated));
+ EXPECT_TRUE(ptr3.get() != nullptr);
EXPECT_LE(8U * MB, ptr3_bytes_allocated);
InstallClass(ptr3, 8 * MB);
// Fails, requires a higher footprint limit.
mirror::Object* ptr4 = space->Alloc(self, 8 * MB, &dummy);
- EXPECT_TRUE(ptr4 == NULL);
+ EXPECT_TRUE(ptr4 == nullptr);
// Also fails, requires a higher allowed footprint.
mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB, &dummy);
- EXPECT_TRUE(ptr5 == NULL);
+ EXPECT_TRUE(ptr5 == nullptr);
// Release some memory.
- ScopedObjectAccess soa(self);
- size_t free3 = space->AllocationSize(ptr3);
+ size_t free3 = space->AllocationSize(ptr3.get());
EXPECT_EQ(free3, ptr3_bytes_allocated);
- EXPECT_EQ(free3, space->Free(self, ptr3));
+ EXPECT_EQ(free3, space->Free(self, ptr3.reset(nullptr)));
EXPECT_LE(8U * MB, free3);
// Succeeds, now that memory has been freed.
- mirror::Object* ptr6 = space->AllocWithGrowth(self, 9 * MB, &dummy);
- EXPECT_TRUE(ptr6 != NULL);
+ SirtRef<mirror::Object> ptr6(self, space->AllocWithGrowth(self, 9 * MB, &dummy));
+ EXPECT_TRUE(ptr6.get() != nullptr);
InstallClass(ptr6, 9 * MB);
// Final clean up.
- size_t free1 = space->AllocationSize(ptr1);
- space->Free(self, ptr1);
+ size_t free1 = space->AllocationSize(ptr1.get());
+ space->Free(self, ptr1.reset(nullptr));
EXPECT_LE(1U * MB, free1);
// Make sure that the zygote space isn't directly at the start of the space.
@@ -199,23 +201,23 @@ void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) {
AddSpace(space);
// Succeeds, fits without adjusting the footprint limit.
- ptr1 = space->Alloc(self, 1 * MB, &dummy);
- EXPECT_TRUE(ptr1 != NULL);
+ ptr1.reset(space->Alloc(self, 1 * MB, &dummy));
+ EXPECT_TRUE(ptr1.get() != nullptr);
InstallClass(ptr1, 1 * MB);
// Fails, requires a higher footprint limit.
ptr2 = space->Alloc(self, 8 * MB, &dummy);
- EXPECT_TRUE(ptr2 == NULL);
+ EXPECT_TRUE(ptr2 == nullptr);
// Succeeds, adjusts the footprint.
- ptr3 = space->AllocWithGrowth(self, 2 * MB, &dummy);
- EXPECT_TRUE(ptr3 != NULL);
+ ptr3.reset(space->AllocWithGrowth(self, 2 * MB, &dummy));
+ EXPECT_TRUE(ptr3.get() != nullptr);
InstallClass(ptr3, 2 * MB);
- space->Free(self, ptr3);
+ space->Free(self, ptr3.reset(nullptr));
// Final clean up.
- free1 = space->AllocationSize(ptr1);
- space->Free(self, ptr1);
+ free1 = space->AllocationSize(ptr1.get());
+ space->Free(self, ptr1.reset(nullptr));
EXPECT_LE(1U * MB, free1);
}
@@ -229,52 +231,52 @@ TEST_F(SpaceTest, ZygoteSpace_RosAllocSpace) {
void SpaceTest::AllocAndFreeTestBody(CreateSpaceFn create_space) {
size_t dummy = 0;
- MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, NULL));
- ASSERT_TRUE(space != NULL);
+ MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
+ ASSERT_TRUE(space != nullptr);
Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
// Make space findable to the heap, will also delete space when runtime is cleaned up
AddSpace(space);
// Succeeds, fits without adjusting the footprint limit.
- mirror::Object* ptr1 = space->Alloc(self, 1 * MB, &dummy);
- EXPECT_TRUE(ptr1 != NULL);
+ SirtRef<mirror::Object> ptr1(self, space->Alloc(self, 1 * MB, &dummy));
+ EXPECT_TRUE(ptr1.get() != nullptr);
InstallClass(ptr1, 1 * MB);
// Fails, requires a higher footprint limit.
mirror::Object* ptr2 = space->Alloc(self, 8 * MB, &dummy);
- EXPECT_TRUE(ptr2 == NULL);
+ EXPECT_TRUE(ptr2 == nullptr);
// Succeeds, adjusts the footprint.
size_t ptr3_bytes_allocated;
- mirror::Object* ptr3 = space->AllocWithGrowth(self, 8 * MB, &ptr3_bytes_allocated);
- EXPECT_TRUE(ptr3 != NULL);
+ SirtRef<mirror::Object> ptr3(self, space->AllocWithGrowth(self, 8 * MB, &ptr3_bytes_allocated));
+ EXPECT_TRUE(ptr3.get() != nullptr);
EXPECT_LE(8U * MB, ptr3_bytes_allocated);
InstallClass(ptr3, 8 * MB);
// Fails, requires a higher footprint limit.
mirror::Object* ptr4 = space->Alloc(self, 8 * MB, &dummy);
- EXPECT_TRUE(ptr4 == NULL);
+ EXPECT_TRUE(ptr4 == nullptr);
// Also fails, requires a higher allowed footprint.
mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB, &dummy);
- EXPECT_TRUE(ptr5 == NULL);
+ EXPECT_TRUE(ptr5 == nullptr);
// Release some memory.
- ScopedObjectAccess soa(self);
- size_t free3 = space->AllocationSize(ptr3);
+ size_t free3 = space->AllocationSize(ptr3.get());
EXPECT_EQ(free3, ptr3_bytes_allocated);
- space->Free(self, ptr3);
+ space->Free(self, ptr3.reset(nullptr));
EXPECT_LE(8U * MB, free3);
// Succeeds, now that memory has been freed.
- mirror::Object* ptr6 = space->AllocWithGrowth(self, 9 * MB, &dummy);
- EXPECT_TRUE(ptr6 != NULL);
+ SirtRef<mirror::Object> ptr6(self, space->AllocWithGrowth(self, 9 * MB, &dummy));
+ EXPECT_TRUE(ptr6.get() != nullptr);
InstallClass(ptr6, 9 * MB);
// Final clean up.
- size_t free1 = space->AllocationSize(ptr1);
- space->Free(self, ptr1);
+ size_t free1 = space->AllocationSize(ptr1.get());
+ space->Free(self, ptr1.reset(nullptr));
EXPECT_LE(1U * MB, free1);
}
@@ -288,11 +290,11 @@ TEST_F(SpaceTest, AllocAndFree_RosAllocSpace) {
TEST_F(SpaceTest, LargeObjectTest) {
size_t rand_seed = 0;
for (size_t i = 0; i < 2; ++i) {
- LargeObjectSpace* los = NULL;
+ LargeObjectSpace* los = nullptr;
if (i == 0) {
los = space::LargeObjectMapSpace::Create("large object space");
} else {
- los = space::FreeListSpace::Create("large object space", NULL, 128 * MB);
+ los = space::FreeListSpace::Create("large object space", nullptr, 128 * MB);
}
static const size_t num_allocations = 64;
@@ -304,7 +306,7 @@ TEST_F(SpaceTest, LargeObjectTest) {
size_t request_size = test_rand(&rand_seed) % max_allocation_size;
size_t allocation_size = 0;
mirror::Object* obj = los->Alloc(Thread::Current(), request_size, &allocation_size);
- ASSERT_TRUE(obj != NULL);
+ ASSERT_TRUE(obj != nullptr);
ASSERT_EQ(allocation_size, los->AllocationSize(obj));
ASSERT_GE(allocation_size, request_size);
// Fill in our magic value.
@@ -337,7 +339,7 @@ TEST_F(SpaceTest, LargeObjectTest) {
size_t bytes_allocated = 0;
// Checks that the coalescing works.
mirror::Object* obj = los->Alloc(Thread::Current(), 100 * MB, &bytes_allocated);
- EXPECT_TRUE(obj != NULL);
+ EXPECT_TRUE(obj != nullptr);
los->Free(Thread::Current(), obj);
EXPECT_EQ(0U, los->GetBytesAllocated());
@@ -347,12 +349,13 @@ TEST_F(SpaceTest, LargeObjectTest) {
}
void SpaceTest::AllocAndFreeListTestBody(CreateSpaceFn create_space) {
- MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, NULL));
- ASSERT_TRUE(space != NULL);
+ MallocSpace* space(create_space("test", 4 * MB, 16 * MB, 16 * MB, nullptr));
+ ASSERT_TRUE(space != nullptr);
// Make space findable to the heap, will also delete space when runtime is cleaned up
AddSpace(space);
Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
// Succeeds, fits without adjusting the max allowed footprint.
mirror::Object* lots_of_objects[1024];
@@ -361,17 +364,16 @@ void SpaceTest::AllocAndFreeListTestBody(CreateSpaceFn create_space) {
size_t size_of_zero_length_byte_array = SizeOfZeroLengthByteArray();
lots_of_objects[i] = space->Alloc(self, size_of_zero_length_byte_array, &allocation_size);
EXPECT_TRUE(lots_of_objects[i] != nullptr);
- InstallClass(lots_of_objects[i], size_of_zero_length_byte_array);
+ SirtRef<mirror::Object> obj(self, lots_of_objects[i]);
+ InstallClass(obj, size_of_zero_length_byte_array);
+ lots_of_objects[i] = obj.get();
EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i]));
}
- // Release memory and check pointers are NULL.
- {
- ScopedObjectAccess soa(self);
- space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
- for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
- EXPECT_TRUE(lots_of_objects[i] == nullptr);
- }
+ // Release memory and check pointers are nullptr.
+ space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
+ for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
+ EXPECT_TRUE(lots_of_objects[i] == nullptr);
}
// Succeeds, fits by adjusting the max allowed footprint.
@@ -379,17 +381,17 @@ void SpaceTest::AllocAndFreeListTestBody(CreateSpaceFn create_space) {
size_t allocation_size = 0;
lots_of_objects[i] = space->AllocWithGrowth(self, 1024, &allocation_size);
EXPECT_TRUE(lots_of_objects[i] != nullptr);
- InstallClass(lots_of_objects[i], 1024);
+ SirtRef<mirror::Object> obj(self, lots_of_objects[i]);
+ InstallClass(obj, 1024);
+ lots_of_objects[i] = obj.get();
EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i]));
}
- // Release memory and check pointers are NULL
- {
- ScopedObjectAccess soa(self);
- space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
- for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
- EXPECT_TRUE(lots_of_objects[i] == nullptr);
- }
+ // Release memory and check pointers are nullptr
+ // TODO: This isn't compaction safe, fix.
+ space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
+ for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
+ EXPECT_TRUE(lots_of_objects[i] == nullptr);
}
}
@@ -430,6 +432,7 @@ void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t
size_t last_object = 0; // last object for which allocation succeeded
size_t amount_allocated = 0; // amount of space allocated
Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
size_t rand_seed = 123456789;
for (size_t i = 0; i < max_objects; i++) {
size_t alloc_fails = 0; // number of failed allocations
@@ -446,19 +449,19 @@ void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t
alloc_size = size_of_zero_length_byte_array;
}
}
- mirror::Object* object;
+ SirtRef<mirror::Object> object(self, nullptr);
size_t bytes_allocated = 0;
if (round <= 1) {
- object = space->Alloc(self, alloc_size, &bytes_allocated);
+ object.reset(space->Alloc(self, alloc_size, &bytes_allocated));
} else {
- object = space->AllocWithGrowth(self, alloc_size, &bytes_allocated);
+ object.reset(space->AllocWithGrowth(self, alloc_size, &bytes_allocated));
}
footprint = space->GetFootprint();
EXPECT_GE(space->Size(), footprint); // invariant
- if (object != NULL) { // allocation succeeded
+ if (object.get() != nullptr) { // allocation succeeded
InstallClass(object, alloc_size);
- lots_of_objects.get()[i] = object;
- size_t allocation_size = space->AllocationSize(object);
+ lots_of_objects[i] = object.get();
+ size_t allocation_size = space->AllocationSize(object.get());
EXPECT_EQ(bytes_allocated, allocation_size);
if (object_size > 0) {
EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
@@ -489,8 +492,11 @@ void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t
// Release storage in a semi-adhoc manner
size_t free_increment = 96;
while (true) {
- // Give the space a haircut
- space->Trim();
+ {
+ ScopedThreadStateChange tsc(self, kNative);
+ // Give the space a haircut.
+ space->Trim();
+ }
// Bounds sanity
footprint = space->GetFootprint();
@@ -504,30 +510,28 @@ void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t
break;
}
- {
- // Free some objects
- ScopedObjectAccess soa(self);
- for (size_t i = 0; i < last_object; i += free_increment) {
- mirror::Object* object = lots_of_objects.get()[i];
- if (object == NULL) {
- continue;
- }
- size_t allocation_size = space->AllocationSize(object);
- if (object_size > 0) {
- EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
- } else {
- EXPECT_GE(allocation_size, 8u);
- }
- space->Free(self, object);
- lots_of_objects.get()[i] = NULL;
- amount_allocated -= allocation_size;
- footprint = space->GetFootprint();
- EXPECT_GE(space->Size(), footprint); // invariant
+ // Free some objects
+ for (size_t i = 0; i < last_object; i += free_increment) {
+ mirror::Object* object = lots_of_objects.get()[i];
+ if (object == nullptr) {
+ continue;
}
-
- free_increment >>= 1;
+ size_t allocation_size = space->AllocationSize(object);
+ if (object_size > 0) {
+ EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
+ } else {
+ EXPECT_GE(allocation_size, 8u);
+ }
+ space->Free(self, object);
+ lots_of_objects.get()[i] = nullptr;
+ amount_allocated -= allocation_size;
+ footprint = space->GetFootprint();
+ EXPECT_GE(space->Size(), footprint); // invariant
}
+
+ free_increment >>= 1;
}
+
// The space has become empty here before allocating a large object
// below. For RosAlloc, revoke thread-local runs, which are kept
// even when empty for a performance reason, so that they won't
@@ -537,15 +541,15 @@ void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t
space->RevokeAllThreadLocalBuffers();
// All memory was released, try a large allocation to check freed memory is being coalesced
- mirror::Object* large_object;
+ SirtRef<mirror::Object> large_object(self, nullptr);
size_t three_quarters_space = (growth_limit / 2) + (growth_limit / 4);
size_t bytes_allocated = 0;
if (round <= 1) {
- large_object = space->Alloc(self, three_quarters_space, &bytes_allocated);
+ large_object.reset(space->Alloc(self, three_quarters_space, &bytes_allocated));
} else {
- large_object = space->AllocWithGrowth(self, three_quarters_space, &bytes_allocated);
+ large_object.reset(space->AllocWithGrowth(self, three_quarters_space, &bytes_allocated));
}
- EXPECT_TRUE(large_object != NULL);
+ EXPECT_TRUE(large_object.get() != nullptr);
InstallClass(large_object, three_quarters_space);
// Sanity check footprint
@@ -555,10 +559,8 @@ void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t
EXPECT_LE(space->Size(), growth_limit);
// Clean up
- {
- ScopedObjectAccess soa(self);
- space->Free(self, large_object);
- }
+ space->Free(self, large_object.reset(nullptr));
+
// Sanity check footprint
footprint = space->GetFootprint();
EXPECT_LE(footprint, growth_limit);
@@ -574,8 +576,8 @@ void SpaceTest::SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size, Create
size_t initial_size = 4 * MB;
size_t growth_limit = 8 * MB;
size_t capacity = 16 * MB;
- MallocSpace* space(create_space("test", initial_size, growth_limit, capacity, NULL));
- ASSERT_TRUE(space != NULL);
+ MallocSpace* space(create_space("test", initial_size, growth_limit, capacity, nullptr));
+ ASSERT_TRUE(space != nullptr);
// Basic sanity
EXPECT_EQ(space->Capacity(), growth_limit);
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index e37fb61..9e1d915 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -31,7 +31,7 @@ static inline void AssignRegister(ShadowFrame* new_shadow_frame, const ShadowFra
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// If both register locations contains the same value, the register probably holds a reference.
int32_t src_value = shadow_frame.GetVReg(src_reg);
- mirror::Object* o = shadow_frame.GetVRegReference<false>(src_reg);
+ mirror::Object* o = shadow_frame.GetVRegReference<kVerifyNone>(src_reg);
if (src_value == reinterpret_cast<intptr_t>(o)) {
new_shadow_frame->SetVRegReference(dest_reg, o);
} else {
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 7c5de5e..90aaccd 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -27,10 +27,13 @@
namespace art {
namespace mirror {
+template<VerifyObjectFlags kVerifyFlags>
inline size_t Array::SizeOf() {
// This is safe from overflow because the array was already allocated, so we know it's sane.
- size_t component_size = GetClass()->GetComponentSize();
- int32_t component_count = GetLength();
+ size_t component_size = GetClass<kVerifyFlags>()->GetComponentSize();
+ // Don't need to check this since we already check this in GetClass.
+ int32_t component_count =
+ GetLength<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>();
size_t header_size = sizeof(Object) + (component_size == sizeof(int64_t) ? 8 : 4);
size_t data_size = component_count * component_size;
return header_size + data_size;
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 7555975..c4f9a75 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -53,17 +53,18 @@ class MANAGED Array : public Object {
const SirtRef<IntArray>& dimensions)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
size_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
int32_t GetLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetField32(OFFSET_OF_OBJECT_MEMBER(Array, length_), false);
+ return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Array, length_), false);
}
void SetLength(int32_t length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
CHECK_GE(length, 0);
// We use non transactional version since we can't undo this write. We also disable checking
// since it would fail during a transaction.
- SetField32<false, false>(OFFSET_OF_OBJECT_MEMBER(Array, length_), length, false, false);
+ SetField32<false, false, kVerifyNone>(OFFSET_OF_OBJECT_MEMBER(Array, length_), length, false);
}
static MemberOffset LengthOffset() {
@@ -94,8 +95,10 @@ class MANAGED Array : public Object {
// Returns true if the index is valid. If not, throws an ArrayIndexOutOfBoundsException and
// returns false.
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CheckIsValidIndex(int32_t index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (UNLIKELY(static_cast<uint32_t>(index) >= static_cast<uint32_t>(GetLength()))) {
+ if (UNLIKELY(static_cast<uint32_t>(index) >=
+ static_cast<uint32_t>(GetLength<kVerifyFlags>()))) {
ThrowArrayIndexOutOfBoundsException(index);
return false;
}
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index d347724..d5eccaf 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -215,6 +215,13 @@ inline bool ArtMethod::IsImtConflictMethod() {
DCHECK(!result || IsRuntimeMethod());
return result;
}
+
+template<VerifyObjectFlags kVerifyFlags>
+inline void ArtMethod::SetNativeMethod(const void* native_method) {
+ SetFieldPtr<false, true, kVerifyFlags>(
+ OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_jni_), native_method, false);
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index 20d2b18..fe27992 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -360,10 +360,5 @@ void ArtMethod::UnregisterNative(Thread* self) {
RegisterNative(self, GetJniDlsymLookupStub(), false);
}
-void ArtMethod::SetNativeMethod(const void* native_method) {
- SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_jni_),
- native_method, false);
-}
-
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index 86f5348..e17dc5f 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -214,40 +214,48 @@ class MANAGED ArtMethod : public Object {
void Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result,
const char* shorty) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
EntryPointFromInterpreter* GetEntryPointFromInterpreter() {
- return GetFieldPtr<EntryPointFromInterpreter*>(
+ return GetFieldPtr<EntryPointFromInterpreter*, kVerifyFlags>(
OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_interpreter_), false);
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetEntryPointFromInterpreter(EntryPointFromInterpreter* entry_point_from_interpreter) {
- SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_interpreter_),
- entry_point_from_interpreter, false);
+ SetFieldPtr<false, true, kVerifyFlags>(
+ OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_interpreter_),
+ entry_point_from_interpreter, false);
}
static MemberOffset EntryPointFromPortableCompiledCodeOffset() {
return MemberOffset(OFFSETOF_MEMBER(ArtMethod, entry_point_from_portable_compiled_code_));
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
const void* GetEntryPointFromPortableCompiledCode() {
- return GetFieldPtr<const void*>(EntryPointFromPortableCompiledCodeOffset(), false);
+ return GetFieldPtr<const void*, kVerifyFlags>(
+ EntryPointFromPortableCompiledCodeOffset(), false);
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetEntryPointFromPortableCompiledCode(const void* entry_point_from_portable_compiled_code) {
- SetFieldPtr<false>(EntryPointFromPortableCompiledCodeOffset(),
- entry_point_from_portable_compiled_code, false);
+ SetFieldPtr<false, true, kVerifyFlags>(
+ EntryPointFromPortableCompiledCodeOffset(), entry_point_from_portable_compiled_code, false);
}
static MemberOffset EntryPointFromQuickCompiledCodeOffset() {
return MemberOffset(OFFSETOF_MEMBER(ArtMethod, entry_point_from_quick_compiled_code_));
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
const void* GetEntryPointFromQuickCompiledCode() {
- return GetFieldPtr<const void*>(EntryPointFromQuickCompiledCodeOffset(), false);
+ return GetFieldPtr<const void*, kVerifyFlags>(EntryPointFromQuickCompiledCodeOffset(), false);
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetEntryPointFromQuickCompiledCode(const void* entry_point_from_quick_compiled_code) {
- SetFieldPtr<false>(EntryPointFromQuickCompiledCodeOffset(),
- entry_point_from_quick_compiled_code, false);
+ SetFieldPtr<false, true, kVerifyFlags>(
+ EntryPointFromQuickCompiledCodeOffset(), entry_point_from_quick_compiled_code, false);
}
@@ -279,9 +287,10 @@ class MANAGED ArtMethod : public Object {
false);
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetMappingTable(const uint8_t* mapping_table) {
- SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_mapping_table_),
- mapping_table, false);
+ SetFieldPtr<false, true, kVerifyFlags>(
+ OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_mapping_table_), mapping_table, false);
}
uint32_t GetOatMappingTableOffset();
@@ -294,8 +303,10 @@ class MANAGED ArtMethod : public Object {
false);
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetVmapTable(const uint8_t* vmap_table) {
- SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_vmap_table_), vmap_table, false);
+ SetFieldPtr<false, true, kVerifyFlags>(
+ OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_vmap_table_), vmap_table, false);
}
uint32_t GetOatVmapTableOffset();
@@ -305,8 +316,10 @@ class MANAGED ArtMethod : public Object {
const uint8_t* GetNativeGcMap() {
return GetFieldPtr<uint8_t*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, gc_map_), false);
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetNativeGcMap(const uint8_t* data) {
- SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, gc_map_), data, false);
+ SetFieldPtr<false, true, kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, gc_map_), data,
+ false);
}
// When building the oat need a convenient place to stuff the offset of the native GC map.
@@ -350,6 +363,7 @@ class MANAGED ArtMethod : public Object {
return reinterpret_cast<const void*>(GetField32(NativeMethodOffset(), false));
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetNativeMethod(const void*);
static MemberOffset GetMethodIndexOffset() {
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index cba221d..e82c393 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -85,6 +85,7 @@ inline uint32_t Class::NumDirectMethods() {
return (GetDirectMethods() != NULL) ? GetDirectMethods()->GetLength() : 0;
}
+template<VerifyObjectFlags kVerifyFlags>
inline ObjectArray<ArtMethod>* Class::GetVirtualMethods() {
DCHECK(IsLoaded() || IsErroneous());
return GetFieldObject<ObjectArray<ArtMethod> >(
@@ -103,8 +104,9 @@ inline uint32_t Class::NumVirtualMethods() {
return (GetVirtualMethods() != NULL) ? GetVirtualMethods()->GetLength() : 0;
}
+template<VerifyObjectFlags kVerifyFlags>
inline ArtMethod* Class::GetVirtualMethod(uint32_t i) {
- DCHECK(IsResolved() || IsErroneous());
+ DCHECK(IsResolved<kVerifyFlags>() || IsErroneous<kVerifyFlags>());
return GetVirtualMethods()->Get(i);
}
@@ -415,14 +417,16 @@ inline void Class::SetVerifyErrorClass(Class* klass) {
}
}
+template<VerifyObjectFlags kVerifyFlags>
inline uint32_t Class::GetAccessFlags() {
// Check class is loaded or this is java.lang.String that has a
// circularity issue during loading the names of its members
- DCHECK(IsLoaded() || IsErroneous() ||
+ DCHECK(IsLoaded<kVerifyFlags>() ||
+ IsErroneous<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>() ||
this == String::GetJavaLangString() ||
this == ArtField::GetJavaLangReflectArtField() ||
this == ArtMethod::GetJavaLangReflectArtMethod());
- return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_), false);
+ return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_), false);
}
inline String* Class::GetName() {
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index cd8504b..43db996 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -120,9 +120,11 @@ class MANAGED Class : public Object {
kStatusMax = 10,
};
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
Status GetStatus() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK_EQ(sizeof(Status), sizeof(uint32_t));
- return static_cast<Status>(GetField32(OFFSET_OF_OBJECT_MEMBER(Class, status_), true));
+ COMPILE_ASSERT(sizeof(Status) == sizeof(uint32_t), size_of_status_not_uint32);
+ return static_cast<Status>(GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, status_),
+ true));
}
void SetStatus(Status new_status, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -132,45 +134,54 @@ class MANAGED Class : public Object {
}
// Returns true if the class has failed to link.
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsErroneous() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetStatus() == kStatusError;
+ return GetStatus<kVerifyFlags>() == kStatusError;
}
// Returns true if the class has been loaded.
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsIdxLoaded() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetStatus() >= kStatusIdx;
+ return GetStatus<kVerifyFlags>() >= kStatusIdx;
}
// Returns true if the class has been loaded.
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsLoaded() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetStatus() >= kStatusLoaded;
+ return GetStatus<kVerifyFlags>() >= kStatusLoaded;
}
// Returns true if the class has been linked.
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsResolved() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetStatus() >= kStatusResolved;
+ return GetStatus<kVerifyFlags>() >= kStatusResolved;
}
// Returns true if the class was compile-time verified.
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsCompileTimeVerified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetStatus() >= kStatusRetryVerificationAtRuntime;
+ return GetStatus<kVerifyFlags>() >= kStatusRetryVerificationAtRuntime;
}
// Returns true if the class has been verified.
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsVerified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetStatus() >= kStatusVerified;
+ return GetStatus<kVerifyFlags>() >= kStatusVerified;
}
// Returns true if the class is initializing.
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsInitializing() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetStatus() >= kStatusInitializing;
+ return GetStatus<kVerifyFlags>() >= kStatusInitializing;
}
// Returns true if the class is initialized.
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsInitialized() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetStatus() == kStatusInitialized;
+ return GetStatus<kVerifyFlags>() == kStatusInitialized;
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetAccessFlags(uint32_t new_access_flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -217,24 +228,29 @@ class MANAGED Class : public Object {
return (GetAccessFlags() & kAccSynthetic) != 0;
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return (GetAccessFlags() & kAccClassIsReference) != 0;
+ return (GetAccessFlags<kVerifyFlags>() & kAccClassIsReference) != 0;
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsWeakReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return (GetAccessFlags() & kAccClassIsWeakReference) != 0;
+ return (GetAccessFlags<kVerifyFlags>() & kAccClassIsWeakReference) != 0;
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsSoftReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return (GetAccessFlags() & kAccReferenceFlagsMask) == kAccClassIsReference;
+ return (GetAccessFlags<kVerifyFlags>() & kAccReferenceFlagsMask) == kAccClassIsReference;
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsFinalizerReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return (GetAccessFlags() & kAccClassIsFinalizerReference) != 0;
+ return (GetAccessFlags<kVerifyFlags>() & kAccClassIsFinalizerReference) != 0;
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsPhantomReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return (GetAccessFlags() & kAccClassIsPhantomReference) != 0;
+ return (GetAccessFlags<kVerifyFlags>() & kAccClassIsPhantomReference) != 0;
}
// Can references of this type be assigned to by things of another type? For non-array types
@@ -260,7 +276,7 @@ class MANAGED Class : public Object {
// Computes the name, then sets the cached value.
String* ComputeName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsProxyClass() {
+ bool IsProxyClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Read access flags without using getter as whether something is a proxy can be check in
// any loaded state
// TODO: switch to a check if the super class is java.lang.reflect.Proxy?
@@ -268,10 +284,11 @@ class MANAGED Class : public Object {
return (access_flags & kAccClassIsProxy) != 0;
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
Primitive::Type GetPrimitiveType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK_EQ(sizeof(Primitive::Type), sizeof(int32_t));
return static_cast<Primitive::Type>(
- GetField32(OFFSET_OF_OBJECT_MEMBER(Class, primitive_type_), false));
+ GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, primitive_type_), false));
}
void SetPrimitiveType(Primitive::Type new_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -280,48 +297,61 @@ class MANAGED Class : public Object {
}
// Returns true if the class is a primitive type.
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsPrimitive() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetPrimitiveType() != Primitive::kPrimNot;
+ return GetPrimitiveType<kVerifyFlags>() != Primitive::kPrimNot;
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsPrimitiveBoolean() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetPrimitiveType() == Primitive::kPrimBoolean;
+ return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimBoolean;
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsPrimitiveByte() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetPrimitiveType() == Primitive::kPrimByte;
+ return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimByte;
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsPrimitiveChar() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetPrimitiveType() == Primitive::kPrimChar;
+ return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimChar;
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsPrimitiveShort() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetPrimitiveType() == Primitive::kPrimShort;
+ return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimShort;
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsPrimitiveInt() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return GetPrimitiveType() == Primitive::kPrimInt;
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsPrimitiveLong() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetPrimitiveType() == Primitive::kPrimLong;
+ return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimLong;
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsPrimitiveFloat() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetPrimitiveType() == Primitive::kPrimFloat;
+ return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimFloat;
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsPrimitiveDouble() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetPrimitiveType() == Primitive::kPrimDouble;
+ return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimDouble;
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsPrimitiveVoid() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetPrimitiveType() == Primitive::kPrimVoid;
+ return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimVoid;
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsPrimitiveArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return IsArrayClass() && GetComponentType()->IsPrimitive();
+ return IsArrayClass<kVerifyFlags>() &&
+ GetComponentType<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>()->
+ IsPrimitive();
}
// Depth of class from java.lang.Object
@@ -333,8 +363,9 @@ class MANAGED Class : public Object {
return depth;
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetComponentType() != NULL;
+ return GetComponentType<kVerifyFlags>() != NULL;
}
bool IsClassClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -351,8 +382,9 @@ class MANAGED Class : public Object {
return OFFSET_OF_OBJECT_MEMBER(Class, component_type_);
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
Class* GetComponentType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldObject<Class>(ComponentTypeOffset(), false);
+ return GetFieldObject<Class, kVerifyFlags>(ComponentTypeOffset(), false);
}
void SetComponentType(Class* new_component_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -376,8 +408,9 @@ class MANAGED Class : public Object {
return (!IsPrimitive() && !IsInterface() && !IsAbstract()) || ((IsAbstract()) && IsArrayClass());
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsObjectArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetComponentType() != NULL && !GetComponentType()->IsPrimitive();
+ return GetComponentType<kVerifyFlags>() != nullptr && !GetComponentType<kVerifyFlags>()->IsPrimitive();
}
// Creates a raw object instance but does not invoke the default constructor.
@@ -396,12 +429,14 @@ class MANAGED Class : public Object {
return IsClassClass() || IsArrayClass();
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
uint32_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, class_size_), false);
+ return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, class_size_), false);
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
uint32_t GetClassSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, class_size_), false);
+ return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, class_size_), false);
}
void SetClassSize(uint32_t new_class_size)
@@ -549,6 +584,7 @@ class MANAGED Class : public Object {
// Returns the number of static, private, and constructor methods.
uint32_t NumDirectMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ObjectArray<ArtMethod>* GetVirtualMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetVirtualMethods(ObjectArray<ArtMethod>* new_virtual_methods)
@@ -557,6 +593,7 @@ class MANAGED Class : public Object {
// Returns the number of non-inherited virtual methods.
uint32_t NumVirtualMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ArtMethod* GetVirtualMethod(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
ArtMethod* GetVirtualMethodDuringLinking(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -683,9 +720,11 @@ class MANAGED Class : public Object {
false);
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
uint32_t GetReferenceInstanceOffsets() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(IsResolved() || IsErroneous());
- return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, reference_instance_offsets_), false);
+ DCHECK(IsResolved<kVerifyFlags>() || IsErroneous<kVerifyFlags>());
+ return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, reference_instance_offsets_),
+ false);
}
void SetReferenceInstanceOffsets(uint32_t new_reference_offsets)
@@ -725,8 +764,10 @@ class MANAGED Class : public Object {
// TODO: uint16_t
void SetStaticField(uint32_t i, ArtField* f) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
uint32_t GetReferenceStaticOffsets() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, reference_static_offsets_), false);
+ return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, reference_static_offsets_),
+ false);
}
void SetReferenceStaticOffsets(uint32_t new_reference_offsets)
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 70291c1..df8104d 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -32,18 +32,21 @@
namespace art {
namespace mirror {
+template<VerifyObjectFlags kVerifyFlags>
inline Class* Object::GetClass() {
- return GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Object, klass_), false);
+ return GetFieldObject<Class, kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Object, klass_), false);
}
+template<VerifyObjectFlags kVerifyFlags>
inline void Object::SetClass(Class* new_klass) {
// new_klass may be NULL prior to class linker initialization.
// We don't mark the card as this occurs as part of object allocation. Not all objects have
// backing cards, such as large objects.
// We use non transactional version since we can't undo this write. We also disable checking as
// we may run in transaction mode here.
- SetFieldObjectWithoutWriteBarrier<false, false>(OFFSET_OF_OBJECT_MEMBER(Object, klass_),
- new_klass, false, false);
+ SetFieldObjectWithoutWriteBarrier<false, false,
+ static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>(
+ OFFSET_OF_OBJECT_MEMBER(Object, klass_), new_klass, false);
}
inline LockWord Object::GetLockWord() {
@@ -89,176 +92,222 @@ inline void Object::Wait(Thread* self, int64_t ms, int32_t ns) {
Monitor::Wait(self, this, ms, ns, true, kTimedWaiting);
}
+template<VerifyObjectFlags kVerifyFlags>
inline bool Object::VerifierInstanceOf(Class* klass) {
DCHECK(klass != NULL);
- DCHECK(GetClass() != NULL);
+ DCHECK(GetClass<kVerifyFlags>() != NULL);
return klass->IsInterface() || InstanceOf(klass);
}
+template<VerifyObjectFlags kVerifyFlags>
inline bool Object::InstanceOf(Class* klass) {
DCHECK(klass != NULL);
- DCHECK(GetClass() != NULL);
- return klass->IsAssignableFrom(GetClass());
+ DCHECK(GetClass<kVerifyNone>() != NULL);
+ return klass->IsAssignableFrom(GetClass<kVerifyFlags>());
}
+template<VerifyObjectFlags kVerifyFlags>
inline bool Object::IsClass() {
- Class* java_lang_Class = GetClass()->GetClass();
- return GetClass() == java_lang_Class;
+ Class* java_lang_Class = GetClass<kVerifyFlags>()->GetClass();
+ return GetClass<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>() ==
+ java_lang_Class;
}
+template<VerifyObjectFlags kVerifyFlags>
inline Class* Object::AsClass() {
- DCHECK(IsClass());
+ DCHECK(IsClass<kVerifyFlags>());
return down_cast<Class*>(this);
}
+template<VerifyObjectFlags kVerifyFlags>
inline bool Object::IsObjectArray() {
- return IsArrayInstance() && !GetClass()->GetComponentType()->IsPrimitive();
+ constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
+ return IsArrayInstance<kVerifyFlags>() &&
+ !GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitive();
}
-template<class T>
+template<class T, VerifyObjectFlags kVerifyFlags>
inline ObjectArray<T>* Object::AsObjectArray() {
- DCHECK(IsObjectArray());
+ DCHECK(IsObjectArray<kVerifyFlags>());
return down_cast<ObjectArray<T>*>(this);
}
+template<VerifyObjectFlags kVerifyFlags>
inline bool Object::IsArrayInstance() {
- return GetClass()->IsArrayClass();
+ return GetClass<kVerifyFlags>()->IsArrayClass();
}
+template<VerifyObjectFlags kVerifyFlags>
inline bool Object::IsArtField() {
- return GetClass()->IsArtFieldClass();
+ return GetClass<kVerifyFlags>()->IsArtFieldClass();
}
-inline ArtField* Object::AsArtField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(IsArtField());
+template<VerifyObjectFlags kVerifyFlags>
+inline ArtField* Object::AsArtField() {
+ DCHECK(IsArtField<kVerifyFlags>());
return down_cast<ArtField*>(this);
}
+template<VerifyObjectFlags kVerifyFlags>
inline bool Object::IsArtMethod() {
- return GetClass()->IsArtMethodClass();
+ return GetClass<kVerifyFlags>()->IsArtMethodClass();
}
+template<VerifyObjectFlags kVerifyFlags>
inline ArtMethod* Object::AsArtMethod() {
- DCHECK(IsArtMethod());
+ DCHECK(IsArtMethod<kVerifyFlags>());
return down_cast<ArtMethod*>(this);
}
+template<VerifyObjectFlags kVerifyFlags>
inline bool Object::IsReferenceInstance() {
- return GetClass()->IsReferenceClass();
+ return GetClass<kVerifyFlags>()->IsReferenceClass();
}
+template<VerifyObjectFlags kVerifyFlags>
inline Array* Object::AsArray() {
- DCHECK(IsArrayInstance());
+ DCHECK(IsArrayInstance<kVerifyFlags>());
return down_cast<Array*>(this);
}
+template<VerifyObjectFlags kVerifyFlags>
inline BooleanArray* Object::AsBooleanArray() {
- DCHECK(GetClass()->IsArrayClass());
- DCHECK(GetClass()->GetComponentType()->IsPrimitiveBoolean());
+ constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
+ DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
+ DCHECK(GetClass<kNewFlags>()->GetComponentType()->IsPrimitiveBoolean());
return down_cast<BooleanArray*>(this);
}
+template<VerifyObjectFlags kVerifyFlags>
inline ByteArray* Object::AsByteArray() {
- DCHECK(GetClass()->IsArrayClass());
- DCHECK(GetClass()->GetComponentType()->IsPrimitiveByte());
+ static const VerifyObjectFlags kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
+ DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
+ DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveByte());
return down_cast<ByteArray*>(this);
}
+template<VerifyObjectFlags kVerifyFlags>
inline ByteArray* Object::AsByteSizedArray() {
- DCHECK(GetClass()->IsArrayClass());
- DCHECK(GetClass()->GetComponentType()->IsPrimitiveByte() ||
- GetClass()->GetComponentType()->IsPrimitiveBoolean());
+ constexpr VerifyObjectFlags kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
+ DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
+ DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveByte() ||
+ GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveBoolean());
return down_cast<ByteArray*>(this);
}
+template<VerifyObjectFlags kVerifyFlags>
inline CharArray* Object::AsCharArray() {
- DCHECK(GetClass()->IsArrayClass());
- DCHECK(GetClass()->GetComponentType()->IsPrimitiveChar());
+ constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
+ DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
+ DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveChar());
return down_cast<CharArray*>(this);
}
+template<VerifyObjectFlags kVerifyFlags>
inline ShortArray* Object::AsShortArray() {
- DCHECK(GetClass()->IsArrayClass());
- DCHECK(GetClass()->GetComponentType()->IsPrimitiveShort());
+ constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
+ DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
+ DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveShort());
return down_cast<ShortArray*>(this);
}
+template<VerifyObjectFlags kVerifyFlags>
inline ShortArray* Object::AsShortSizedArray() {
- DCHECK(GetClass()->IsArrayClass());
- DCHECK(GetClass()->GetComponentType()->IsPrimitiveShort() ||
- GetClass()->GetComponentType()->IsPrimitiveChar());
+ constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
+ DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
+ DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveShort() ||
+ GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveChar());
return down_cast<ShortArray*>(this);
}
+template<VerifyObjectFlags kVerifyFlags>
inline IntArray* Object::AsIntArray() {
- DCHECK(GetClass()->IsArrayClass());
- DCHECK(GetClass()->GetComponentType()->IsPrimitiveInt() ||
- GetClass()->GetComponentType()->IsPrimitiveFloat());
+ constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
+ DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
+ DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveInt() ||
+ GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveFloat());
return down_cast<IntArray*>(this);
}
+template<VerifyObjectFlags kVerifyFlags>
inline LongArray* Object::AsLongArray() {
- DCHECK(GetClass()->IsArrayClass());
- DCHECK(GetClass()->GetComponentType()->IsPrimitiveLong() ||
- GetClass()->GetComponentType()->IsPrimitiveDouble());
+ constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
+ DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
+ DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveLong() ||
+ GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveDouble());
return down_cast<LongArray*>(this);
}
+template<VerifyObjectFlags kVerifyFlags>
inline FloatArray* Object::AsFloatArray() {
- DCHECK(GetClass()->IsArrayClass());
- DCHECK(GetClass()->GetComponentType()->IsPrimitiveFloat());
+ constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
+ DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
+ DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveFloat());
return down_cast<FloatArray*>(this);
}
+template<VerifyObjectFlags kVerifyFlags>
inline DoubleArray* Object::AsDoubleArray() {
- DCHECK(GetClass()->IsArrayClass());
- DCHECK(GetClass()->GetComponentType()->IsPrimitiveDouble());
+ constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
+ DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
+ DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveDouble());
return down_cast<DoubleArray*>(this);
}
+template<VerifyObjectFlags kVerifyFlags>
inline String* Object::AsString() {
- DCHECK(GetClass()->IsStringClass());
+ DCHECK(GetClass<kVerifyFlags>()->IsStringClass());
return down_cast<String*>(this);
}
+template<VerifyObjectFlags kVerifyFlags>
inline Throwable* Object::AsThrowable() {
- DCHECK(GetClass()->IsThrowableClass());
+ DCHECK(GetClass<kVerifyFlags>()->IsThrowableClass());
return down_cast<Throwable*>(this);
}
+template<VerifyObjectFlags kVerifyFlags>
inline bool Object::IsWeakReferenceInstance() {
- return GetClass()->IsWeakReferenceClass();
+ return GetClass<kVerifyFlags>()->IsWeakReferenceClass();
}
+template<VerifyObjectFlags kVerifyFlags>
inline bool Object::IsSoftReferenceInstance() {
- return GetClass()->IsSoftReferenceClass();
+ return GetClass<kVerifyFlags>()->IsSoftReferenceClass();
}
+template<VerifyObjectFlags kVerifyFlags>
inline bool Object::IsFinalizerReferenceInstance() {
- return GetClass()->IsFinalizerReferenceClass();
+ return GetClass<kVerifyFlags>()->IsFinalizerReferenceClass();
}
+template<VerifyObjectFlags kVerifyFlags>
inline bool Object::IsPhantomReferenceInstance() {
- return GetClass()->IsPhantomReferenceClass();
+ return GetClass<kVerifyFlags>()->IsPhantomReferenceClass();
}
+template<VerifyObjectFlags kVerifyFlags>
inline size_t Object::SizeOf() {
size_t result;
- if (IsArrayInstance()) {
- result = AsArray()->SizeOf();
- } else if (IsClass()) {
- result = AsClass()->SizeOf();
+ constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
+ if (IsArrayInstance<kVerifyFlags>()) {
+ result = AsArray<kNewFlags>()->SizeOf<>();
+ } else if (IsClass<kNewFlags>()) {
+ result = AsClass<kNewFlags>()->SizeOf<kNewFlags>();
} else {
- result = GetClass()->GetObjectSize();
+ result = GetClass<kNewFlags>()->GetObjectSize();
}
- DCHECK_GE(result, sizeof(Object)) << " class=" << PrettyTypeOf(GetClass());
- DCHECK(!IsArtField() || result == sizeof(ArtField));
- DCHECK(!IsArtMethod() || result == sizeof(ArtMethod));
+ DCHECK_GE(result, sizeof(Object)) << " class=" << PrettyTypeOf(GetClass<kNewFlags>());
+ DCHECK(!IsArtField<kNewFlags>() || result == sizeof(ArtField));
+ DCHECK(!IsArtMethod<kNewFlags>() || result == sizeof(ArtMethod));
return result;
}
+template<VerifyObjectFlags kVerifyFlags>
inline int32_t Object::GetField32(MemberOffset field_offset, bool is_volatile) {
- VerifyObject(this);
+ if (kVerifyFlags & kVerifyThis) {
+ VerifyObject(this);
+ }
const byte* raw_addr = reinterpret_cast<const byte*>(this) + field_offset.Int32Value();
const int32_t* word_addr = reinterpret_cast<const int32_t*>(raw_addr);
if (UNLIKELY(is_volatile)) {
@@ -270,9 +319,8 @@ inline int32_t Object::GetField32(MemberOffset field_offset, bool is_volatile) {
}
}
-template<bool kTransactionActive, bool kCheckTransaction>
-inline void Object::SetField32(MemberOffset field_offset, int32_t new_value, bool is_volatile,
- bool this_is_valid) {
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
+inline void Object::SetField32(MemberOffset field_offset, int32_t new_value, bool is_volatile) {
if (kCheckTransaction) {
DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
}
@@ -280,7 +328,7 @@ inline void Object::SetField32(MemberOffset field_offset, int32_t new_value, boo
Runtime::Current()->RecordWriteField32(this, field_offset, GetField32(field_offset, is_volatile),
is_volatile);
}
- if (this_is_valid) {
+ if (kVerifyFlags & kVerifyThis) {
VerifyObject(this);
}
byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
@@ -294,7 +342,7 @@ inline void Object::SetField32(MemberOffset field_offset, int32_t new_value, boo
}
}
-template<bool kTransactionActive, bool kCheckTransaction>
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
inline bool Object::CasField32(MemberOffset field_offset, int32_t old_value, int32_t new_value) {
if (kCheckTransaction) {
DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
@@ -302,14 +350,19 @@ inline bool Object::CasField32(MemberOffset field_offset, int32_t old_value, int
if (kTransactionActive) {
Runtime::Current()->RecordWriteField32(this, field_offset, old_value, true);
}
- VerifyObject(this);
+ if (kVerifyFlags & kVerifyThis) {
+ VerifyObject(this);
+ }
byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
volatile int32_t* addr = reinterpret_cast<volatile int32_t*>(raw_addr);
return __sync_bool_compare_and_swap(addr, old_value, new_value);
}
+template<VerifyObjectFlags kVerifyFlags>
inline int64_t Object::GetField64(MemberOffset field_offset, bool is_volatile) {
- VerifyObject(this);
+ if (kVerifyFlags & kVerifyThis) {
+ VerifyObject(this);
+ }
const byte* raw_addr = reinterpret_cast<const byte*>(this) + field_offset.Int32Value();
const int64_t* addr = reinterpret_cast<const int64_t*>(raw_addr);
if (UNLIKELY(is_volatile)) {
@@ -321,9 +374,8 @@ inline int64_t Object::GetField64(MemberOffset field_offset, bool is_volatile) {
}
}
-template<bool kTransactionActive, bool kCheckTransaction>
-inline void Object::SetField64(MemberOffset field_offset, int64_t new_value, bool is_volatile,
- bool this_is_valid) {
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
+inline void Object::SetField64(MemberOffset field_offset, int64_t new_value, bool is_volatile) {
if (kCheckTransaction) {
DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
}
@@ -331,7 +383,7 @@ inline void Object::SetField64(MemberOffset field_offset, int64_t new_value, boo
Runtime::Current()->RecordWriteField64(this, field_offset, GetField64(field_offset, is_volatile),
is_volatile);
}
- if (this_is_valid) {
+ if (kVerifyFlags & kVerifyThis) {
VerifyObject(this);
}
byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
@@ -349,7 +401,7 @@ inline void Object::SetField64(MemberOffset field_offset, int64_t new_value, boo
}
}
-template<bool kTransactionActive, bool kCheckTransaction>
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
inline bool Object::CasField64(MemberOffset field_offset, int64_t old_value, int64_t new_value) {
if (kCheckTransaction) {
DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
@@ -357,15 +409,19 @@ inline bool Object::CasField64(MemberOffset field_offset, int64_t old_value, int
if (kTransactionActive) {
Runtime::Current()->RecordWriteField64(this, field_offset, old_value, true);
}
- VerifyObject(this);
+ if (kVerifyFlags & kVerifyThis) {
+ VerifyObject(this);
+ }
byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
volatile int64_t* addr = reinterpret_cast<volatile int64_t*>(raw_addr);
return QuasiAtomic::Cas64(old_value, new_value, addr);
}
-template<class T>
+template<class T, VerifyObjectFlags kVerifyFlags>
inline T* Object::GetFieldObject(MemberOffset field_offset, bool is_volatile) {
- VerifyObject(this);
+ if (kVerifyFlags & kVerifyThis) {
+ VerifyObject(this);
+ }
byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
HeapReference<T>* objref_addr = reinterpret_cast<HeapReference<T>*>(raw_addr);
HeapReference<T> objref = *objref_addr;
@@ -374,13 +430,15 @@ inline T* Object::GetFieldObject(MemberOffset field_offset, bool is_volatile) {
QuasiAtomic::MembarLoadLoad(); // Ensure loads don't re-order.
}
T* result = objref.AsMirrorPtr();
- VerifyObject(result);
+ if (kVerifyFlags & kVerifyReads) {
+ VerifyObject(result);
+ }
return result;
}
-template<bool kTransactionActive, bool kCheckTransaction>
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
inline void Object::SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset, Object* new_value,
- bool is_volatile, bool this_is_valid) {
+ bool is_volatile) {
if (kCheckTransaction) {
DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
}
@@ -389,10 +447,12 @@ inline void Object::SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset,
GetFieldObject<Object>(field_offset, is_volatile),
true);
}
- if (this_is_valid) {
+ if (kVerifyFlags & kVerifyThis) {
VerifyObject(this);
}
- VerifyObject(new_value);
+ if (kVerifyFlags & kVerifyWrites) {
+ VerifyObject(new_value);
+ }
HeapReference<Object> objref(HeapReference<Object>::FromMirrorPtr(new_value));
byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
HeapReference<Object>* objref_addr = reinterpret_cast<HeapReference<Object>*>(raw_addr);
@@ -405,27 +465,43 @@ inline void Object::SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset,
}
}
-template<bool kTransactionActive, bool kCheckTransaction>
-inline void Object::SetFieldObject(MemberOffset field_offset, Object* new_value, bool is_volatile,
- bool this_is_valid) {
- SetFieldObjectWithoutWriteBarrier<kTransactionActive, kCheckTransaction>(field_offset, new_value,
- is_volatile,
- this_is_valid);
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
+inline void Object::SetFieldObject(MemberOffset field_offset, Object* new_value, bool is_volatile) {
+ SetFieldObjectWithoutWriteBarrier<kTransactionActive, kCheckTransaction, kVerifyFlags>(
+ field_offset, new_value, is_volatile);
if (new_value != nullptr) {
CheckFieldAssignment(field_offset, new_value);
Runtime::Current()->GetHeap()->WriteBarrierField(this, field_offset, new_value);
}
}
-template<bool kTransactionActive, bool kCheckTransaction>
-inline bool Object::CasFieldObject(MemberOffset field_offset, Object* old_value, Object* new_value) {
+template <VerifyObjectFlags kVerifyFlags>
+inline HeapReference<Object>* Object::GetFieldObjectReferenceAddr(MemberOffset field_offset) {
+ if (kVerifyFlags & kVerifyThis) {
+ VerifyObject(this);
+ }
+ return reinterpret_cast<HeapReference<Object>*>(reinterpret_cast<byte*>(this) +
+ field_offset.Int32Value());
+}
+
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
+inline bool Object::CasFieldObject(MemberOffset field_offset, Object* old_value,
+ Object* new_value) {
if (kCheckTransaction) {
DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
}
+ if (kVerifyFlags & kVerifyThis) {
+ VerifyObject(this);
+ }
+ if (kVerifyFlags & kVerifyWrites) {
+ VerifyObject(new_value);
+ }
+ if (kVerifyFlags & kVerifyReads) {
+ VerifyObject(old_value);
+ }
if (kTransactionActive) {
Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
}
- VerifyObject(this);
byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
volatile int32_t* addr = reinterpret_cast<volatile int32_t*>(raw_addr);
HeapReference<Object> old_ref(HeapReference<Object>::FromMirrorPtr(old_value));
@@ -437,12 +513,6 @@ inline bool Object::CasFieldObject(MemberOffset field_offset, Object* old_value,
return success;
}
-inline void Object::VerifyObject(Object* obj) {
- if (kIsDebugBuild) {
- Runtime::Current()->GetHeap()->VerifyObject(obj);
- }
-}
-
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 1251852..f1485e5 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -141,10 +141,9 @@ int32_t Object::IdentityHashCode() const {
void Object::CheckFieldAssignmentImpl(MemberOffset field_offset, Object* new_value) {
Class* c = GetClass();
- if (Runtime::Current()->GetClassLinker() == NULL ||
- !Runtime::Current()->IsStarted() ||
- !Runtime::Current()->GetHeap()->IsObjectValidationEnabled() ||
- !c->IsResolved()) {
+ Runtime* runtime = Runtime::Current();
+ if (runtime->GetClassLinker() == nullptr || !runtime->IsStarted() ||
+ !runtime->GetHeap()->IsObjectValidationEnabled() || !c->IsResolved()) {
return;
}
for (Class* cur = c; cur != NULL; cur = cur->GetSuperClass()) {
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index eb118c7..7487dd2 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -24,6 +24,7 @@
#include "object_reference.h"
#include "offsets.h"
#include "runtime.h"
+#include "verify_object.h"
namespace art {
@@ -59,7 +60,8 @@ class Throwable;
#define OFFSET_OF_OBJECT_MEMBER(type, field) \
MemberOffset(OFFSETOF_MEMBER(type, field))
-constexpr bool kCheckFieldAssignments = false;
+// Checks that we don't do field assignments which violate the typing system.
+static constexpr bool kCheckFieldAssignments = false;
// C++ mirror of java.lang.Object
class MANAGED Object {
@@ -68,16 +70,20 @@ class MANAGED Object {
return OFFSET_OF_OBJECT_MEMBER(Object, klass_);
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
Class* GetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetClass(Class* new_klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// The verifier treats all interfaces as java.lang.Object and relies on runtime checks in
// invoke-interface to detect incompatible interface types.
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool VerifierInstanceOf(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool InstanceOf(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
size_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Object* Clone(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -95,135 +101,147 @@ class MANAGED Object {
mirror::Object* MonitorEnter(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCK_FUNCTION(monitor_lock_);
-
bool MonitorExit(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
UNLOCK_FUNCTION(monitor_lock_);
-
void Notify(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
void NotifyAll(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
void Wait(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
void Wait(Thread* self, int64_t timeout, int32_t nanos) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
Class* AsClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsObjectArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- template<class T>
+ template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ObjectArray<T>* AsObjectArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsArrayInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
Array* AsArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
BooleanArray* AsBooleanArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ByteArray* AsByteArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ByteArray* AsByteSizedArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
CharArray* AsCharArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ShortArray* AsShortArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ShortArray* AsShortSizedArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
IntArray* AsIntArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
LongArray* AsLongArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
FloatArray* AsFloatArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
DoubleArray* AsDoubleArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
String* AsString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
Throwable* AsThrowable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsArtMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ArtMethod* AsArtMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsArtField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ArtField* AsArtField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsWeakReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsSoftReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsFinalizerReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsPhantomReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Accessor for Java type fields.
- template<class T> T* GetFieldObject(MemberOffset field_offset, bool is_volatile)
+ template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ T* GetFieldObject(MemberOffset field_offset, bool is_volatile)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template<bool kTransactionActive, bool kCheckTransaction = true>
+ template<bool kTransactionActive, bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset, Object* new_value,
- bool is_volatile, bool this_is_valid = true)
+ bool is_volatile)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template<bool kTransactionActive, bool kCheckTransaction = true>
- void SetFieldObject(MemberOffset field_offset, Object* new_value, bool is_volatile,
- bool this_is_valid = true)
+ template<bool kTransactionActive, bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ void SetFieldObject(MemberOffset field_offset, Object* new_value, bool is_volatile)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template<bool kTransactionActive, bool kCheckTransaction = true>
+ template<bool kTransactionActive, bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldObject(MemberOffset field_offset, Object* old_value, Object* new_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- HeapReference<Object>* GetFieldObjectReferenceAddr(MemberOffset field_offset) ALWAYS_INLINE {
- VerifyObject(this);
- return reinterpret_cast<HeapReference<Object>*>(reinterpret_cast<byte*>(this) +
- field_offset.Int32Value());
- }
-
- int32_t GetField32(MemberOffset field_offset, bool is_volatile);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ HeapReference<Object>* GetFieldObjectReferenceAddr(MemberOffset field_offset);
- template<bool kTransactionActive, bool kCheckTransaction = true>
- void SetField32(MemberOffset field_offset, int32_t new_value, bool is_volatile,
- bool this_is_valid = true);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ int32_t GetField32(MemberOffset field_offset, bool is_volatile)
+ NO_THREAD_SAFETY_ANALYSIS;
- template<bool kTransactionActive, bool kCheckTransaction = true>
+ template<bool kTransactionActive, bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ void SetField32(MemberOffset field_offset, int32_t new_value, bool is_volatile);
+ template<bool kTransactionActive, bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasField32(MemberOffset field_offset, int32_t old_value, int32_t new_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
int64_t GetField64(MemberOffset field_offset, bool is_volatile);
+ template<bool kTransactionActive, bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ void SetField64(MemberOffset field_offset, int64_t new_value, bool is_volatile);
- template<bool kTransactionActive, bool kCheckTransaction = true>
- void SetField64(MemberOffset field_offset, int64_t new_value, bool is_volatile,
- bool this_is_valid = true);
-
- template<bool kTransactionActive, bool kCheckTransaction = true>
+ template<bool kTransactionActive, bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasField64(MemberOffset field_offset, int64_t old_value, int64_t new_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template<bool kTransactionActive, bool kCheckTransaction = true, typename T>
- void SetFieldPtr(MemberOffset field_offset, T new_value, bool is_volatile,
- bool this_is_valid = true) {
+ template<bool kTransactionActive, bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T>
+ void SetFieldPtr(MemberOffset field_offset, T new_value, bool is_volatile) {
#ifndef __LP64__
- SetField32<kTransactionActive, kCheckTransaction>(field_offset,
- reinterpret_cast<int32_t>(new_value),
- is_volatile, this_is_valid);
+ SetField32<kTransactionActive, kCheckTransaction, kVerifyFlags>(
+ field_offset, reinterpret_cast<int32_t>(new_value), is_volatile);
#else
- SetField64<kTransactionActive, kCheckTransaction>(field_offset,
- reinterpret_cast<int64_t>(new_value),
- is_volatile, this_is_valid);
+ SetField64<kTransactionActive, kCheckTransaction, kVerifyFlags>(
+ field_offset, reinterpret_cast<int64_t>(new_value), is_volatile);
#endif
}
protected:
// Accessors for non-Java type fields
- template<class T>
- T GetFieldPtr(MemberOffset field_offset, bool is_volatile) {
+ template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ T GetFieldPtr(MemberOffset field_offset, bool is_volatile) NO_THREAD_SAFETY_ANALYSIS {
#ifndef __LP64__
- return reinterpret_cast<T>(GetField32(field_offset, is_volatile));
+ return reinterpret_cast<T>(GetField32<kVerifyFlags>(field_offset, is_volatile));
#else
- return reinterpret_cast<T>(GetField64(field_offset, is_volatile));
+ return reinterpret_cast<T>(GetField64<kVerifyFlags>(field_offset, is_volatile));
#endif
}
private:
- static void VerifyObject(Object* obj) ALWAYS_INLINE;
// Verify the type correctness of stores to fields.
void CheckFieldAssignmentImpl(MemberOffset field_offset, Object* new_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index 521b6ce..a427957 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -58,10 +58,10 @@ inline T* ObjectArray<T>::Get(int32_t i) {
return GetFieldObject<T>(OffsetOfElement(i), false);
}
-template<class T>
+template<class T> template<VerifyObjectFlags kVerifyFlags>
inline bool ObjectArray<T>::CheckAssignable(T* object) {
if (object != NULL) {
- Class* element_class = GetClass()->GetComponentType();
+ Class* element_class = GetClass<kVerifyFlags>()->GetComponentType();
if (UNLIKELY(!object->InstanceOf(element_class))) {
ThrowArrayStoreException(object);
return false;
@@ -80,31 +80,33 @@ inline void ObjectArray<T>::Set(int32_t i, T* object) {
}
template<class T>
-template<bool kTransactionActive, bool kCheckTransaction>
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
inline void ObjectArray<T>::Set(int32_t i, T* object) {
- if (LIKELY(CheckIsValidIndex(i) && CheckAssignable(object))) {
- SetFieldObject<kTransactionActive, kCheckTransaction>(OffsetOfElement(i), object, false);
+ if (LIKELY(CheckIsValidIndex(i) && CheckAssignable<kVerifyFlags>(object))) {
+ SetFieldObject<kTransactionActive, kCheckTransaction, kVerifyFlags>(OffsetOfElement(i), object,
+ false);
} else {
DCHECK(Thread::Current()->IsExceptionPending());
}
}
template<class T>
-template<bool kTransactionActive, bool kCheckTransaction>
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
inline void ObjectArray<T>::SetWithoutChecks(int32_t i, T* object) {
- DCHECK(CheckIsValidIndex(i));
- DCHECK(CheckAssignable(object));
- SetFieldObject<kTransactionActive, kCheckTransaction>(OffsetOfElement(i), object, false);
+ DCHECK(CheckIsValidIndex<kVerifyFlags>(i));
+ DCHECK(CheckAssignable<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>(object));
+ SetFieldObject<kTransactionActive, kCheckTransaction, kVerifyFlags>(OffsetOfElement(i), object,
+ false);
}
template<class T>
-template<bool kTransactionActive, bool kCheckTransaction>
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
inline void ObjectArray<T>::SetWithoutChecksAndWriteBarrier(int32_t i, T* object) {
- DCHECK(CheckIsValidIndex(i));
+ DCHECK(CheckIsValidIndex<kVerifyFlags>(i));
// TODO: enable this check. It fails when writing the image in ImageWriter::FixupObjectArray.
// DCHECK(CheckAssignable(object));
- SetFieldObjectWithoutWriteBarrier<kTransactionActive, kCheckTransaction>(OffsetOfElement(i),
- object, false);
+ SetFieldObjectWithoutWriteBarrier<kTransactionActive, kCheckTransaction, kVerifyFlags>(
+ OffsetOfElement(i), object, false);
}
template<class T>
diff --git a/runtime/mirror/object_array.h b/runtime/mirror/object_array.h
index 668b276..7f9e716 100644
--- a/runtime/mirror/object_array.h
+++ b/runtime/mirror/object_array.h
@@ -37,22 +37,27 @@ class MANAGED ObjectArray : public Array {
// Returns true if the object can be stored into the array. If not, throws
// an ArrayStoreException and returns false.
- bool CheckAssignable(T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // TODO fix thread safety analysis: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ bool CheckAssignable(T* object) NO_THREAD_SAFETY_ANALYSIS;
void Set(int32_t i, T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// TODO fix thread safety analysis: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
- template<bool kTransactionActive, bool kCheckTransaction = true>
+ template<bool kTransactionActive, bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void Set(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS;
// Set element without bound and element type checks, to be used in limited
// circumstances, such as during boot image writing.
// TODO fix thread safety analysis broken by the use of template. This should be
// SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
- template<bool kTransactionActive, bool kCheckTransaction = true>
+ template<bool kTransactionActive, bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetWithoutChecks(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS;
// TODO fix thread safety analysis broken by the use of template. This should be
// SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
- template<bool kTransactionActive, bool kCheckTransaction = true>
+ template<bool kTransactionActive, bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetWithoutChecksAndWriteBarrier(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS;
T* GetWithoutChecks(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/native/java_lang_String.cc b/runtime/native/java_lang_String.cc
index c401d50..f95664b 100644
--- a/runtime/native/java_lang_String.cc
+++ b/runtime/native/java_lang_String.cc
@@ -20,6 +20,7 @@
#include "scoped_fast_native_object_access.h"
#include "scoped_thread_state_change.h"
#include "ScopedLocalRef.h"
+#include "verify_object-inl.h"
namespace art {
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index 011e165..2665a08 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -24,6 +24,7 @@
#include "ScopedUtfChars.h"
#include "thread.h"
#include "thread_list.h"
+#include "verify_object-inl.h"
namespace art {
diff --git a/runtime/native/java_lang_reflect_Proxy.cc b/runtime/native/java_lang_reflect_Proxy.cc
index 809369a..1266c41 100644
--- a/runtime/native/java_lang_reflect_Proxy.cc
+++ b/runtime/native/java_lang_reflect_Proxy.cc
@@ -20,6 +20,7 @@
#include "mirror/object_array.h"
#include "mirror/string.h"
#include "scoped_thread_state_change.h"
+#include "verify_object-inl.h"
namespace art {
diff --git a/runtime/native/scoped_fast_native_object_access.h b/runtime/native/scoped_fast_native_object_access.h
index 1658d96..b5ee748 100644
--- a/runtime/native/scoped_fast_native_object_access.h
+++ b/runtime/native/scoped_fast_native_object_access.h
@@ -21,6 +21,7 @@
#include "jni_internal.h"
#include "thread-inl.h"
#include "mirror/art_method.h"
+#include "verify_object.h"
namespace art {
@@ -79,9 +80,7 @@ class ScopedFastNativeObjectAccess {
return NULL;
}
- if (kIsDebugBuild) {
- Runtime::Current()->GetHeap()->VerifyObject(obj);
- }
+ VerifyObject(obj);
DCHECK_NE((reinterpret_cast<uintptr_t>(obj) & 0xffff0000), 0xebad0000);
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index 1ca6c4e..2f959db 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -19,6 +19,7 @@
#include "base/casts.h"
#include "thread-inl.h"
+#include "verify_object.h"
namespace art {
@@ -165,9 +166,7 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange {
return NULL;
}
- if (kIsDebugBuild) {
- Runtime::Current()->GetHeap()->VerifyObject(obj);
- }
+ VerifyObject(obj);
DCHECK_NE((reinterpret_cast<uintptr_t>(obj) & 0xffff0000), 0xebad0000);
diff --git a/runtime/sirt_ref.h b/runtime/sirt_ref.h
index 3c5e4f8..b22e816 100644
--- a/runtime/sirt_ref.h
+++ b/runtime/sirt_ref.h
@@ -45,8 +45,11 @@ class SirtRef {
return down_cast<T*>(sirt_.GetReference(0));
}
- void reset(T* object = nullptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Returns the old reference.
+ T* reset(T* object = nullptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ T* old_ref = get();
sirt_.SetReference(0, object);
+ return old_ref;
}
private:
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 864b86a..a6a0b29 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -25,14 +25,11 @@
#include "runtime.h"
#include "thread_list.h"
#include "throw_location.h"
+#include "verify_object-inl.h"
#include "vmap_table.h"
namespace art {
-bool ShadowFrame::VerifyReference(const mirror::Object* val) const {
- return !Runtime::Current()->GetHeap()->IsInTempSpace(val);
-}
-
mirror::Object* ShadowFrame::GetThisObject() const {
mirror::ArtMethod* m = GetMethod();
if (m->IsStatic()) {
diff --git a/runtime/stack.h b/runtime/stack.h
index 7e9889e..6a62922 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -22,7 +22,9 @@
#include "base/casts.h"
#include "base/macros.h"
#include "arch/context.h"
+#include "mirror/object.h"
#include "mirror/object_reference.h"
+#include "verify_object.h"
#include <stdint.h>
#include <string>
@@ -213,26 +215,20 @@ class ShadowFrame {
return *reinterpret_cast<unaligned_double*>(vreg);
}
- template <bool kChecked = false>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
mirror::Object* GetVRegReference(size_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK_LT(i, NumberOfVRegs());
+ mirror::Object* ref;
if (HasReferenceArray()) {
- mirror::Object* ref = References()[i].AsMirrorPtr();
- if (kChecked) {
- CHECK(VerifyReference(ref)) << "VReg " << i << "(" << ref
- << ") is in protected space, reference array " << true;
- }
- return ref;
+ ref = References()[i].AsMirrorPtr();
} else {
const uint32_t* vreg_ptr = &vregs_[i];
- mirror::Object* ref =
- reinterpret_cast<const StackReference<mirror::Object>*>(vreg_ptr)->AsMirrorPtr();
- if (kChecked) {
- CHECK(VerifyReference(ref)) << "VReg " << i
- << "(" << ref << ") is in protected space, reference array " << false;
- }
- return ref;
+ ref = reinterpret_cast<const StackReference<mirror::Object>*>(vreg_ptr)->AsMirrorPtr();
}
+ if (kVerifyFlags & kVerifyReads) {
+ VerifyObject(ref);
+ }
+ return ref;
}
// Get view of vregs as range of consecutive arguments starting at i.
@@ -290,10 +286,12 @@ class ShadowFrame {
}
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetVRegReference(size_t i, mirror::Object* val) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK_LT(i, NumberOfVRegs());
- DCHECK(!kMovingCollector || VerifyReference(val))
- << "VReg " << i << "(" << val << ") is in protected space";
+ if (kVerifyFlags & kVerifyWrites) {
+ VerifyObject(val);
+ }
uint32_t* vreg = &vregs_[i];
reinterpret_cast<StackReference<mirror::Object>*>(vreg)->Assign(val);
if (HasReferenceArray()) {
@@ -374,8 +372,6 @@ class ShadowFrame {
return reinterpret_cast<const StackReference<mirror::Object>*>(vreg_end);
}
- bool VerifyReference(const mirror::Object* val) const;
-
StackReference<mirror::Object>* References() {
return const_cast<StackReference<mirror::Object>*>(const_cast<const ShadowFrame*>(this)->References());
}
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index c0bf377..f7e88cc 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -152,9 +152,10 @@ inline ThreadState Thread::TransitionFromSuspendedToRunnable() {
}
inline void Thread::VerifyStack() {
- gc::Heap* heap = Runtime::Current()->GetHeap();
- if (heap->IsObjectValidationEnabled()) {
- VerifyStackImpl();
+ if (kVerifyStack) {
+ if (Runtime::Current()->GetHeap()->IsObjectValidationEnabled()) {
+ VerifyStackImpl();
+ }
}
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 16655fb..8949a5b 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -66,6 +66,7 @@
#include "thread_list.h"
#include "utils.h"
#include "verifier/dex_gc_map.h"
+#include "verify_object-inl.h"
#include "vmap_table.h"
#include "well_known_classes.h"
@@ -1217,8 +1218,8 @@ mirror::Object* Thread::DecodeJObject(jobject obj) const {
if (UNLIKELY(result == nullptr)) {
JniAbortF(nullptr, "use of deleted %s %p", ToStr<IndirectRefKind>(kind).c_str(), obj);
} else {
- if (kIsDebugBuild && (result != kInvalidIndirectRefObject)) {
- Runtime::Current()->GetHeap()->VerifyObject(result);
+ if (result != kInvalidIndirectRefObject) {
+ VerifyObject(result);
}
}
return result;
@@ -2000,9 +2001,7 @@ class RootCallbackVisitor {
};
void Thread::SetClassLoaderOverride(mirror::ClassLoader* class_loader_override) {
- if (kIsDebugBuild) {
- Runtime::Current()->GetHeap()->VerifyObject(class_loader_override);
- }
+ VerifyObject(class_loader_override);
class_loader_override_ = class_loader_override;
}
@@ -2037,11 +2036,9 @@ void Thread::VisitRoots(RootCallback* visitor, void* arg) {
}
}
-static void VerifyRoot(mirror::Object** root, void* arg, uint32_t /*thread_id*/,
- RootType /*root_type*/) {
- DCHECK(root != nullptr);
- DCHECK(arg != nullptr);
- reinterpret_cast<gc::Heap*>(arg)->VerifyObject(*root);
+static void VerifyRoot(mirror::Object** root, void* /*arg*/, uint32_t /*thread_id*/,
+ RootType /*root_type*/) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ VerifyObject(*root);
}
void Thread::VerifyStackImpl() {
diff --git a/runtime/thread.h b/runtime/thread.h
index 48912d1..9813130 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -375,7 +375,8 @@ class PACKED(4) Thread {
return class_loader_override_;
}
- void SetClassLoaderOverride(mirror::ClassLoader* class_loader_override);
+ void SetClassLoaderOverride(mirror::ClassLoader* class_loader_override)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Create the internal representation of a stack trace, that is more time
// and space efficient to compute than the StackTraceElement[]
@@ -391,7 +392,7 @@ class PACKED(4) Thread {
void VisitRoots(RootCallback* visitor, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ALWAYS_INLINE void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
//
// Offsets of various members of native Thread class, used by compiled code.
diff --git a/runtime/verify_object-inl.h b/runtime/verify_object-inl.h
new file mode 100644
index 0000000..e211c83
--- /dev/null
+++ b/runtime/verify_object-inl.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_VERIFY_OBJECT_INL_H_
+#define ART_RUNTIME_VERIFY_OBJECT_INL_H_
+
+#include "verify_object.h"
+
+#include "gc/heap.h"
+#include "mirror/class-inl.h"
+#include "mirror/object-inl.h"
+
+namespace art {
+
+inline void VerifyObject(mirror::Object* obj) {
+ if (kVerifyObjectSupport > kVerifyObjectModeDisabled && obj != nullptr) {
+ if (kVerifyObjectSupport > kVerifyObjectModeFast) {
+ // Slow object verification, try the heap right away.
+ Runtime::Current()->GetHeap()->VerifyObjectBody(obj);
+ } else {
+ // Fast object verification, only call the heap if our quick sanity tests fail. The heap will
+ // print the diagnostic message.
+ bool failed = !IsAligned<kObjectAlignment>(obj);
+ if (!failed) {
+ mirror::Class* c = obj->GetClass<kVerifyNone>();
+ failed = failed || c == nullptr;
+ failed = failed ||!IsAligned<kObjectAlignment>(c);
+ failed = failed ||!VerifyClassClass(c);
+ }
+ if (UNLIKELY(failed)) {
+ Runtime::Current()->GetHeap()->VerifyObjectBody(obj);
+ }
+ }
+ }
+}
+
+inline bool VerifyClassClass(mirror::Class* c) {
+ if (UNLIKELY(c == nullptr)) {
+ return false;
+ }
+ // Note: We pass in flags to ensure that the accessors don't call VerifyObject.
+ mirror::Class* c_c = c->GetClass<kVerifyNone>();
+ return c_c != nullptr && c_c == c_c->GetClass<kVerifyNone>();
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_VERIFY_OBJECT_INL_H_
diff --git a/runtime/verify_object.h b/runtime/verify_object.h
new file mode 100644
index 0000000..b39df4a
--- /dev/null
+++ b/runtime/verify_object.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_VERIFY_OBJECT_H_
+#define ART_RUNTIME_VERIFY_OBJECT_H_
+
+#include "locks.h"
+
+#include <stdint.h>
+
+namespace art {
+
+namespace mirror {
+ class Class;
+ class Object;
+} // namespace mirror
+
+// How we want to sanity check the heap's correctness.
+enum VerifyObjectMode {
+ kVerifyObjectModeDisabled, // Heap verification is disabled.
+ kVerifyObjectModeFast, // Sanity heap accesses quickly by using VerifyClassClass.
+ kVerifyObjectModeAll // Sanity heap accesses thoroughly.
+};
+
+enum VerifyObjectFlags {
+ kVerifyNone = 0x0,
+ // Verify self when we are doing an operation.
+ kVerifyThis = 0x1,
+ // Verify reads from objects.
+ kVerifyReads = 0x2,
+ // Verify writes to objects.
+ kVerifyWrites = 0x4,
+ // Verify all things.
+ kVerifyAll = kVerifyThis | kVerifyReads | kVerifyWrites,
+};
+
+static constexpr bool kVerifyStack = kIsDebugBuild;
+static constexpr VerifyObjectFlags kDefaultVerifyFlags = kVerifyNone;
+static constexpr VerifyObjectMode kVerifyObjectSupport =
+ kDefaultVerifyFlags != 0 ? kVerifyObjectModeFast : kVerifyObjectModeDisabled;
+
+ALWAYS_INLINE inline void VerifyObject(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
+
+// Check that c.getClass() == c.getClass().getClass().
+ALWAYS_INLINE inline bool VerifyClassClass(mirror::Class* c) NO_THREAD_SAFETY_ANALYSIS;
+
+} // namespace art
+
+#endif // ART_RUNTIME_VERIFY_OBJECT_H_