summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2014-05-07 15:43:14 -0700
committerMathieu Chartier <mathieuc@google.com>2014-05-13 14:45:54 -0700
commiteb8167a4f4d27fce0530f6724ab8032610cd146b (patch)
treebcfeaf13ad78f2dd68466bbd0e20c71944f7e854
parent6fb66a2bc4e1c0b7931101153e58714991237af7 (diff)
downloadart-eb8167a4f4d27fce0530f6724ab8032610cd146b.zip
art-eb8167a4f4d27fce0530f6724ab8032610cd146b.tar.gz
art-eb8167a4f4d27fce0530f6724ab8032610cd146b.tar.bz2
Add Handle/HandleScope and delete SirtRef.
Delete SirtRef and replaced it with Handle. Handles are value types which wrap around StackReference*. Renamed StackIndirectReferenceTable to HandleScope. Added a scoped handle wrapper which wraps around an Object** and restores it in its destructor. Renamed Handle::get -> Get. Bug: 8473721 Change-Id: Idbfebd4f35af629f0f43931b7c5184b334822c7a
-rw-r--r--build/Android.gtest.mk4
-rw-r--r--compiler/common_compiler_test.h10
-rw-r--r--compiler/dex/mir_field_info.cc32
-rw-r--r--compiler/dex/mir_method_info.cc21
-rw-r--r--compiler/driver/compiler_driver-inl.h37
-rw-r--r--compiler/driver/compiler_driver.cc153
-rw-r--r--compiler/driver/compiler_driver.h18
-rw-r--r--compiler/driver/compiler_driver_test.cc9
-rw-r--r--compiler/elf_writer_mclinker.cc4
-rw-r--r--compiler/image_writer.cc75
-rw-r--r--compiler/jni/jni_compiler_test.cc14
-rw-r--r--compiler/jni/portable/jni_compiler.cc38
-rw-r--r--compiler/jni/quick/arm/calling_convention_arm.cc6
-rw-r--r--compiler/jni/quick/arm64/calling_convention_arm64.cc6
-rw-r--r--compiler/jni/quick/calling_convention.cc14
-rw-r--r--compiler/jni/quick/calling_convention.h33
-rw-r--r--compiler/jni/quick/jni_compiler.cc90
-rw-r--r--compiler/jni/quick/mips/calling_convention_mips.cc6
-rw-r--r--compiler/jni/quick/x86/calling_convention_x86.cc6
-rw-r--r--compiler/jni/quick/x86_64/calling_convention_x86_64.cc6
-rw-r--r--compiler/oat_test.cc3
-rw-r--r--compiler/oat_writer.cc7
-rw-r--r--compiler/utils/arm/assembler_arm.cc32
-rw-r--r--compiler/utils/arm/assembler_arm.h14
-rw-r--r--compiler/utils/arm64/assembler_arm64.cc30
-rw-r--r--compiler/utils/arm64/assembler_arm64.h14
-rw-r--r--compiler/utils/assembler.h14
-rw-r--r--compiler/utils/mips/assembler_mips.cc34
-rw-r--r--compiler/utils/mips/assembler_mips.h14
-rw-r--r--compiler/utils/x86/assembler_x86.cc22
-rw-r--r--compiler/utils/x86/assembler_x86.h14
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc24
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h14
-rw-r--r--dex2oat/dex2oat.cc2
-rw-r--r--oatdump/oatdump.cc16
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S6
-rw-r--r--runtime/arch/stub_test.cc319
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S10
-rw-r--r--runtime/catch_block_stack_visitor.cc6
-rw-r--r--runtime/catch_block_stack_visitor.h8
-rw-r--r--runtime/check_jni.cc8
-rw-r--r--runtime/class_linker-inl.h42
-rw-r--r--runtime/class_linker.cc824
-rw-r--r--runtime/class_linker.h92
-rw-r--r--runtime/class_linker_test.cc155
-rw-r--r--runtime/debugger.cc53
-rw-r--r--runtime/deoptimize_stack_visitor.cc7
-rw-r--r--runtime/dex_file.cc6
-rw-r--r--runtime/dex_file.h11
-rw-r--r--runtime/entrypoints/entrypoint_utils.h46
-rw-r--r--runtime/entrypoints/interpreter/interpreter_entrypoints.cc7
-rw-r--r--runtime/entrypoints/portable/portable_trampoline_entrypoints.cc8
-rw-r--r--runtime/entrypoints/quick/quick_jni_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc146
-rw-r--r--runtime/exception_test.cc13
-rw-r--r--runtime/fault_handler.h2
-rw-r--r--runtime/gc/collector/garbage_collector.h2
-rw-r--r--runtime/gc/collector/mark_sweep.cc4
-rw-r--r--runtime/gc/collector/mark_sweep.h2
-rw-r--r--runtime/gc/collector/semi_space-inl.h4
-rw-r--r--runtime/gc/collector/semi_space.cc8
-rw-r--r--runtime/gc/collector/semi_space.h2
-rw-r--r--runtime/gc/collector/sticky_mark_sweep.cc2
-rw-r--r--runtime/gc/heap-inl.h20
-rw-r--r--runtime/gc/heap.cc91
-rw-r--r--runtime/gc/heap.h11
-rw-r--r--runtime/gc/heap_test.cc14
-rw-r--r--runtime/gc/space/malloc_space.cc2
-rw-r--r--runtime/gc/space/space_test.h111
-rw-r--r--runtime/handle.h81
-rw-r--r--runtime/handle_scope-inl.h47
-rw-r--r--runtime/handle_scope.h193
-rw-r--r--runtime/handle_scope_test.cc (renamed from runtime/stack_indirect_reference_table_test.cc)36
-rw-r--r--runtime/indirect_reference_table-inl.h2
-rw-r--r--runtime/indirect_reference_table.cc8
-rw-r--r--runtime/indirect_reference_table.h2
-rw-r--r--runtime/intern_table_test.cc112
-rw-r--r--runtime/interpreter/interpreter.cc16
-rw-r--r--runtime/interpreter/interpreter_common.cc26
-rw-r--r--runtime/interpreter/interpreter_common.h8
-rw-r--r--runtime/jni_internal.cc75
-rw-r--r--runtime/jni_internal.h4
-rw-r--r--runtime/mirror/array.cc38
-rw-r--r--runtime/mirror/array.h6
-rw-r--r--runtime/mirror/art_field.cc2
-rw-r--r--runtime/mirror/art_method-inl.h6
-rw-r--r--runtime/mirror/art_method.cc13
-rw-r--r--runtime/mirror/art_method.h4
-rw-r--r--runtime/mirror/class.cc32
-rw-r--r--runtime/mirror/class.h2
-rw-r--r--runtime/mirror/dex_cache_test.cc9
-rw-r--r--runtime/mirror/object.cc18
-rw-r--r--runtime/mirror/object_array-inl.h15
-rw-r--r--runtime/mirror/object_test.cc156
-rw-r--r--runtime/mirror/stack_trace_element.cc18
-rw-r--r--runtime/mirror/stack_trace_element.h12
-rw-r--r--runtime/mirror/string.cc11
-rw-r--r--runtime/mirror/string.h4
-rw-r--r--runtime/monitor.cc44
-rw-r--r--runtime/monitor.h4
-rw-r--r--runtime/native/dalvik_system_DexFile.cc4
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc21
-rw-r--r--runtime/native/java_lang_Class.cc12
-rw-r--r--runtime/native/java_lang_Runtime.cc8
-rw-r--r--runtime/native/java_lang_reflect_Array.cc9
-rw-r--r--runtime/native/java_lang_reflect_Constructor.cc5
-rw-r--r--runtime/native/java_lang_reflect_Field.cc10
-rw-r--r--runtime/oat_file.h2
-rw-r--r--runtime/object_utils.h24
-rw-r--r--runtime/proxy_test.cc10
-rw-r--r--runtime/quick_exception_handler.cc11
-rw-r--r--runtime/reflection.cc7
-rw-r--r--runtime/reflection_test.cc13
-rw-r--r--runtime/runtime.cc48
-rw-r--r--runtime/scoped_thread_state_change.h2
-rw-r--r--runtime/sirt_ref-inl.h50
-rw-r--r--runtime/sirt_ref.h68
-rw-r--r--runtime/stack.cc10
-rw-r--r--runtime/stack.h8
-rw-r--r--runtime/stack_indirect_reference_table.h145
-rw-r--r--runtime/thread.cc134
-rw-r--r--runtime/thread.h45
-rw-r--r--runtime/thread_pool.h2
-rw-r--r--runtime/transaction_test.cc440
-rw-r--r--runtime/utils_test.cc11
-rw-r--r--runtime/verifier/method_verifier-inl.h6
-rw-r--r--runtime/verifier/method_verifier.cc78
-rw-r--r--runtime/verifier/method_verifier.h24
-rw-r--r--runtime/verifier/reg_type_cache.cc7
-rw-r--r--runtime/verifier/register_line.cc2
130 files changed, 2677 insertions, 2423 deletions
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index c986c57..406c2a1 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -47,6 +47,7 @@ RUNTIME_GTEST_COMMON_SRC_FILES := \
runtime/gc/space/rosalloc_space_random_test.cc \
runtime/gc/space/large_object_space_test.cc \
runtime/gtest_test.cc \
+ runtime/handle_scope_test.cc \
runtime/indenter_test.cc \
runtime/indirect_reference_table_test.cc \
runtime/instruction_set_test.cc \
@@ -62,8 +63,7 @@ RUNTIME_GTEST_COMMON_SRC_FILES := \
runtime/utils_test.cc \
runtime/verifier/method_verifier_test.cc \
runtime/verifier/reg_type_test.cc \
- runtime/zip_archive_test.cc \
- runtime/stack_indirect_reference_table_test.cc
+ runtime/zip_archive_test.cc
COMPILER_GTEST_COMMON_SRC_FILES := \
runtime/jni_internal_test.cc \
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index 8f39212..586c442 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -257,7 +257,8 @@ class CommonCompilerTest : public CommonRuntimeTest {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::string class_descriptor(DotToDescriptor(class_name));
Thread* self = Thread::Current();
- SirtRef<mirror::ClassLoader> loader(self, class_loader);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ClassLoader> loader(hs.NewHandle(class_loader));
mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), loader);
CHECK(klass != nullptr) << "Class not found " << class_name;
for (size_t i = 0; i < klass->NumDirectMethods(); i++) {
@@ -352,7 +353,8 @@ class CommonCompilerTest : public CommonRuntimeTest {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::string class_descriptor(DotToDescriptor(class_name));
Thread* self = Thread::Current();
- SirtRef<mirror::ClassLoader> loader(self, class_loader);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ClassLoader> loader(hs.NewHandle(class_loader));
mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), loader);
CHECK(klass != nullptr) << "Class not found " << class_name;
for (size_t i = 0; i < klass->NumDirectMethods(); i++) {
@@ -372,7 +374,7 @@ class CommonCompilerTest : public CommonRuntimeTest {
timings.EndSplit();
}
- void CompileDirectMethod(SirtRef<mirror::ClassLoader>& class_loader, const char* class_name,
+ void CompileDirectMethod(Handle<mirror::ClassLoader>& class_loader, const char* class_name,
const char* method_name, const char* signature)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::string class_descriptor(DotToDescriptor(class_name));
@@ -385,7 +387,7 @@ class CommonCompilerTest : public CommonRuntimeTest {
CompileMethod(method);
}
- void CompileVirtualMethod(SirtRef<mirror::ClassLoader>& class_loader, const char* class_name,
+ void CompileVirtualMethod(Handle<mirror::ClassLoader>& class_loader, const char* class_name,
const char* method_name, const char* signature)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::string class_descriptor(DotToDescriptor(class_name));
diff --git a/compiler/dex/mir_field_info.cc b/compiler/dex/mir_field_info.cc
index 7c630e8..98866d9 100644
--- a/compiler/dex/mir_field_info.cc
+++ b/compiler/dex/mir_field_info.cc
@@ -21,10 +21,10 @@
#include "base/logging.h"
#include "driver/compiler_driver.h"
#include "driver/compiler_driver-inl.h"
-#include "mirror/class_loader.h" // Only to allow casts in SirtRef<ClassLoader>.
-#include "mirror/dex_cache.h" // Only to allow casts in SirtRef<DexCache>.
+#include "mirror/class_loader.h" // Only to allow casts in Handle<ClassLoader>.
+#include "mirror/dex_cache.h" // Only to allow casts in Handle<DexCache>.
#include "scoped_thread_state_change.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
namespace art {
@@ -43,11 +43,12 @@ void MirIFieldLoweringInfo::Resolve(CompilerDriver* compiler_driver,
// We're going to resolve fields and check access in a tight loop. It's better to hold
// the lock and needed references once than re-acquiring them again and again.
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), compiler_driver->GetDexCache(mUnit));
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- compiler_driver->GetClassLoader(soa, mUnit));
- SirtRef<mirror::Class> referrer_class(soa.Self(),
- compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit));
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(compiler_driver->GetDexCache(mUnit)));
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(compiler_driver->GetClassLoader(soa, mUnit)));
+ Handle<mirror::Class> referrer_class(hs.NewHandle(
+ compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit)));
// Even if the referrer class is unresolved (i.e. we're compiling a method without class
// definition) we still want to resolve fields and record all available info.
@@ -63,7 +64,7 @@ void MirIFieldLoweringInfo::Resolve(CompilerDriver* compiler_driver,
bool is_volatile = compiler_driver->IsFieldVolatile(resolved_field);
std::pair<bool, bool> fast_path = compiler_driver->IsFastInstanceField(
- dex_cache.get(), referrer_class.get(), resolved_field, field_idx, &it->field_offset_);
+ dex_cache.Get(), referrer_class.Get(), resolved_field, field_idx, &it->field_offset_);
it->flags_ = 0u | // Without kFlagIsStatic.
(is_volatile ? kFlagIsVolatile : 0u) |
(fast_path.first ? kFlagFastGet : 0u) |
@@ -89,11 +90,12 @@ void MirSFieldLoweringInfo::Resolve(CompilerDriver* compiler_driver,
// We're going to resolve fields and check access in a tight loop. It's better to hold
// the lock and needed references once than re-acquiring them again and again.
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), compiler_driver->GetDexCache(mUnit));
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- compiler_driver->GetClassLoader(soa, mUnit));
- SirtRef<mirror::Class> referrer_class(soa.Self(),
- compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit));
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(compiler_driver->GetDexCache(mUnit)));
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(compiler_driver->GetClassLoader(soa, mUnit)));
+ Handle<mirror::Class> referrer_class(hs.NewHandle(
+ compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit)));
// Even if the referrer class is unresolved (i.e. we're compiling a method without class
// definition) we still want to resolve fields and record all available info.
@@ -110,7 +112,7 @@ void MirSFieldLoweringInfo::Resolve(CompilerDriver* compiler_driver,
bool is_referrers_class, is_initialized;
std::pair<bool, bool> fast_path = compiler_driver->IsFastStaticField(
- dex_cache.get(), referrer_class.get(), resolved_field, field_idx, &it->field_offset_,
+ dex_cache.Get(), referrer_class.Get(), resolved_field, field_idx, &it->field_offset_,
&it->storage_index_, &is_referrers_class, &is_initialized);
it->flags_ = kFlagIsStatic |
(is_volatile ? kFlagIsVolatile : 0u) |
diff --git a/compiler/dex/mir_method_info.cc b/compiler/dex/mir_method_info.cc
index 2c33ef1..cc2bd95 100644
--- a/compiler/dex/mir_method_info.cc
+++ b/compiler/dex/mir_method_info.cc
@@ -19,10 +19,10 @@
#include "driver/compiler_driver.h"
#include "driver/dex_compilation_unit.h"
#include "driver/compiler_driver-inl.h"
-#include "mirror/class_loader.h" // Only to allow casts in SirtRef<ClassLoader>.
-#include "mirror/dex_cache.h" // Only to allow casts in SirtRef<DexCache>.
+#include "mirror/class_loader.h" // Only to allow casts in Handle<ClassLoader>.
+#include "mirror/dex_cache.h" // Only to allow casts in Handle<DexCache>.
#include "scoped_thread_state_change.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
namespace art {
@@ -45,11 +45,12 @@ void MirMethodLoweringInfo::Resolve(CompilerDriver* compiler_driver,
// We're going to resolve methods and check access in a tight loop. It's better to hold
// the lock and needed references once than re-acquiring them again and again.
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), compiler_driver->GetDexCache(mUnit));
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- compiler_driver->GetClassLoader(soa, mUnit));
- SirtRef<mirror::Class> referrer_class(soa.Self(),
- compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit));
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(compiler_driver->GetDexCache(mUnit)));
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(compiler_driver->GetClassLoader(soa, mUnit)));
+ Handle<mirror::Class> referrer_class(hs.NewHandle(
+ compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit)));
// Even if the referrer class is unresolved (i.e. we're compiling a method without class
// definition) we still want to resolve methods and record all available info.
@@ -73,10 +74,10 @@ void MirMethodLoweringInfo::Resolve(CompilerDriver* compiler_driver,
MethodReference target_method(mUnit->GetDexFile(), it->MethodIndex());
int fast_path_flags = compiler_driver->IsFastInvoke(
- soa, dex_cache, class_loader, mUnit, referrer_class.get(), resolved_method, &invoke_type,
+ soa, dex_cache, class_loader, mUnit, referrer_class.Get(), resolved_method, &invoke_type,
&target_method, devirt_target, &it->direct_code_, &it->direct_method_);
bool needs_clinit =
- compiler_driver->NeedsClassInitialization(referrer_class.get(), resolved_method);
+ compiler_driver->NeedsClassInitialization(referrer_class.Get(), resolved_method);
uint16_t other_flags = it->flags_ &
~(kFlagFastPath | kFlagNeedsClassInitialization | (kInvokeTypeMask << kBitSharpTypeBegin));
it->flags_ = other_flags |
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index d9f2a3a..08fd386 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -28,7 +28,7 @@
#include "mirror/dex_cache-inl.h"
#include "mirror/art_field-inl.h"
#include "scoped_thread_state_change.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
namespace art {
@@ -42,10 +42,10 @@ inline mirror::ClassLoader* CompilerDriver::GetClassLoader(ScopedObjectAccess& s
}
inline mirror::Class* CompilerDriver::ResolveCompilingMethodsClass(
- ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit) {
- DCHECK(dex_cache->GetDexFile() == mUnit->GetDexFile());
- DCHECK(class_loader.get() == soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit) {
+ DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile());
+ DCHECK_EQ(class_loader.Get(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
const DexFile::MethodId& referrer_method_id =
mUnit->GetDexFile()->GetMethodId(mUnit->GetDexMethodIndex());
mirror::Class* referrer_class = mUnit->GetClassLinker()->ResolveType(
@@ -59,11 +59,11 @@ inline mirror::Class* CompilerDriver::ResolveCompilingMethodsClass(
}
inline mirror::ArtField* CompilerDriver::ResolveField(
- ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+ ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
uint32_t field_idx, bool is_static) {
- DCHECK(dex_cache->GetDexFile() == mUnit->GetDexFile());
- DCHECK(class_loader.get() == soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile());
+ DCHECK_EQ(class_loader.Get(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
mirror::ArtField* resolved_field = mUnit->GetClassLinker()->ResolveField(
*mUnit->GetDexFile(), field_idx, dex_cache, class_loader, is_static);
DCHECK_EQ(resolved_field == nullptr, soa.Self()->IsExceptionPending());
@@ -165,11 +165,11 @@ inline std::pair<bool, bool> CompilerDriver::IsFastStaticField(
}
inline mirror::ArtMethod* CompilerDriver::ResolveMethod(
- ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+ ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
uint32_t method_idx, InvokeType invoke_type) {
DCHECK(dex_cache->GetDexFile() == mUnit->GetDexFile());
- DCHECK(class_loader.get() == soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ DCHECK(class_loader.Get() == soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
mirror::ArtMethod* resolved_method = mUnit->GetClassLinker()->ResolveMethod(
*mUnit->GetDexFile(), method_idx, dex_cache, class_loader, nullptr, invoke_type);
DCHECK_EQ(resolved_method == nullptr, soa.Self()->IsExceptionPending());
@@ -206,8 +206,8 @@ inline uint16_t CompilerDriver::GetResolvedMethodVTableIndex(
}
inline int CompilerDriver::IsFastInvoke(
- ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+ ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
mirror::Class* referrer_class, mirror::ArtMethod* resolved_method, InvokeType* invoke_type,
MethodReference* target_method, const MethodReference* devirt_target,
uintptr_t* direct_code, uintptr_t* direct_method) {
@@ -217,7 +217,7 @@ inline int CompilerDriver::IsFastInvoke(
}
mirror::Class* methods_class = resolved_method->GetDeclaringClass();
if (UNLIKELY(!referrer_class->CanAccessResolvedMethod(methods_class, resolved_method,
- dex_cache.get(),
+ dex_cache.Get(),
target_method->dex_method_index))) {
return 0;
}
@@ -237,7 +237,7 @@ inline int CompilerDriver::IsFastInvoke(
// Sharpen a virtual call into a direct call. The method_idx is into referrer's
// dex cache, check that this resolved method is where we expect it.
CHECK(target_method->dex_file == mUnit->GetDexFile());
- DCHECK(dex_cache.get() == mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
+ DCHECK(dex_cache.Get() == mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
CHECK(referrer_class->GetDexCache()->GetResolvedMethod(target_method->dex_method_index) ==
resolved_method) << PrettyMethod(resolved_method);
int stats_flags = kFlagMethodResolved;
@@ -259,8 +259,9 @@ inline int CompilerDriver::IsFastInvoke(
devirt_target->dex_method_index,
dex_cache, class_loader, NULL, kVirtual);
} else {
- SirtRef<mirror::DexCache> target_dex_cache(soa.Self(),
- class_linker->FindDexCache(*devirt_target->dex_file));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::DexCache> target_dex_cache(
+ hs.NewHandle(class_linker->FindDexCache(*devirt_target->dex_file)));
called_method = class_linker->ResolveMethod(*devirt_target->dex_file,
devirt_target->dex_method_index,
target_dex_cache, class_loader, NULL, kVirtual);
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 6817f14..547b9f7 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -49,7 +49,7 @@
#include "mirror/throwable.h"
#include "scoped_thread_state_change.h"
#include "ScopedLocalRef.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
#include "thread.h"
#include "thread_pool.h"
#include "trampolines/trampoline_compiler.h"
@@ -509,7 +509,7 @@ void CompilerDriver::CompileAll(jobject class_loader,
}
static DexToDexCompilationLevel GetDexToDexCompilationlevel(
- Thread* self, SirtRef<mirror::ClassLoader>& class_loader, const DexFile& dex_file,
+ Thread* self, Handle<mirror::ClassLoader>& class_loader, const DexFile& dex_file,
const DexFile::ClassDef& class_def) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const char* descriptor = dex_file.GetClassDescriptor(class_def);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -524,7 +524,7 @@ static DexToDexCompilationLevel GetDexToDexCompilationlevel(
// function). Since image classes can be verified again while compiling an application,
// we must prevent the DEX-to-DEX compiler from introducing them.
// TODO: find a way to enable "quick" instructions for image classes and remove this check.
- bool compiling_image_classes = class_loader.get() == nullptr;
+ bool compiling_image_classes = class_loader.Get() == nullptr;
if (compiling_image_classes) {
return kRequired;
} else if (klass->IsVerified()) {
@@ -574,8 +574,9 @@ void CompilerDriver::CompileOne(mirror::ArtMethod* method, TimingLogger* timings
{
ScopedObjectAccess soa(Thread::Current());
const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(jclass_loader));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
dex_to_dex_compilation_level = GetDexToDexCompilationlevel(self, class_loader, *dex_file,
class_def);
}
@@ -700,8 +701,10 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings)
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
for (auto it = image_classes_->begin(), end = image_classes_->end(); it != end;) {
const std::string& descriptor(*it);
- SirtRef<mirror::Class> klass(self, class_linker->FindSystemClass(self, descriptor.c_str()));
- if (klass.get() == NULL) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> klass(
+ hs.NewHandle(class_linker->FindSystemClass(self, descriptor.c_str())));
+ if (klass.Get() == NULL) {
VLOG(compiler) << "Failed to find class " << descriptor;
image_classes_->erase(it++);
self->ClearException();
@@ -714,8 +717,9 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings)
// exceptions are resolved by the verifier when there is a catch block in an interested method.
// Do this here so that exception classes appear to have been specified image classes.
std::set<std::pair<uint16_t, const DexFile*> > unresolved_exception_types;
- SirtRef<mirror::Class> java_lang_Throwable(self,
- class_linker->FindSystemClass(self, "Ljava/lang/Throwable;"));
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> java_lang_Throwable(
+ hs.NewHandle(class_linker->FindSystemClass(self, "Ljava/lang/Throwable;")));
do {
unresolved_exception_types.clear();
class_linker->VisitClasses(ResolveCatchBlockExceptionsClassVisitor,
@@ -723,16 +727,17 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings)
for (const std::pair<uint16_t, const DexFile*>& exception_type : unresolved_exception_types) {
uint16_t exception_type_idx = exception_type.first;
const DexFile* dex_file = exception_type.second;
- SirtRef<mirror::DexCache> dex_cache(self, class_linker->FindDexCache(*dex_file));
- SirtRef<mirror::ClassLoader> class_loader(self, nullptr);
- SirtRef<mirror::Class> klass(self, class_linker->ResolveType(*dex_file, exception_type_idx,
- dex_cache, class_loader));
- if (klass.get() == NULL) {
+ StackHandleScope<3> hs(self);
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(*dex_file)));
+ auto class_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
+ Handle<mirror::Class> klass(hs.NewHandle(
+ class_linker->ResolveType(*dex_file, exception_type_idx, dex_cache, class_loader)));
+ if (klass.Get() == NULL) {
const DexFile::TypeId& type_id = dex_file->GetTypeId(exception_type_idx);
const char* descriptor = dex_file->GetTypeDescriptor(type_id);
LOG(FATAL) << "Failed to resolve class " << descriptor;
}
- DCHECK(java_lang_Throwable->IsAssignableFrom(klass.get()));
+ DCHECK(java_lang_Throwable->IsAssignableFrom(klass.Get()));
}
// Resolving exceptions may load classes that reference more exceptions, iterate until no
// more are found
@@ -816,7 +821,9 @@ bool CompilerDriver::CanAssumeStringIsPresentInDexCache(const DexFile& dex_file,
if (IsImage()) {
// We resolve all const-string strings when building for the image.
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), Runtime::Current()->GetClassLinker()->FindDexCache(dex_file));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(
+ hs.NewHandle(Runtime::Current()->GetClassLinker()->FindDexCache(dex_file)));
Runtime::Current()->GetClassLinker()->ResolveString(dex_file, string_idx, dex_cache);
result = true;
}
@@ -980,16 +987,17 @@ bool CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompi
mirror::Class* referrer_class;
mirror::DexCache* dex_cache;
{
- SirtRef<mirror::DexCache> dex_cache_sirt(soa.Self(),
- mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
- SirtRef<mirror::ClassLoader> class_loader_sirt(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
- SirtRef<mirror::ArtField> resolved_field_sirt(soa.Self(),
- ResolveField(soa, dex_cache_sirt, class_loader_sirt, mUnit, field_idx, false));
- referrer_class = (resolved_field_sirt.get() != nullptr)
- ? ResolveCompilingMethodsClass(soa, dex_cache_sirt, class_loader_sirt, mUnit) : nullptr;
- resolved_field = resolved_field_sirt.get();
- dex_cache = dex_cache_sirt.get();
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache_handle(
+ hs.NewHandle(mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile())));
+ Handle<mirror::ClassLoader> class_loader_handle(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader())));
+ Handle<mirror::ArtField> resolved_field_handle(hs.NewHandle(
+ ResolveField(soa, dex_cache_handle, class_loader_handle, mUnit, field_idx, false)));
+ referrer_class = (resolved_field_handle.Get() != nullptr)
+ ? ResolveCompilingMethodsClass(soa, dex_cache_handle, class_loader_handle, mUnit) : nullptr;
+ resolved_field = resolved_field_handle.Get();
+ dex_cache = dex_cache_handle.Get();
}
bool result = false;
if (resolved_field != nullptr && referrer_class != nullptr) {
@@ -1017,16 +1025,17 @@ bool CompilerDriver::ComputeStaticFieldInfo(uint32_t field_idx, const DexCompila
mirror::Class* referrer_class;
mirror::DexCache* dex_cache;
{
- SirtRef<mirror::DexCache> dex_cache_sirt(soa.Self(),
- mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
- SirtRef<mirror::ClassLoader> class_loader_sirt(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
- SirtRef<mirror::ArtField> resolved_field_sirt(soa.Self(),
- ResolveField(soa, dex_cache_sirt, class_loader_sirt, mUnit, field_idx, true));
- referrer_class = (resolved_field_sirt.get() != nullptr)
- ? ResolveCompilingMethodsClass(soa, dex_cache_sirt, class_loader_sirt, mUnit) : nullptr;
- resolved_field = resolved_field_sirt.get();
- dex_cache = dex_cache_sirt.get();
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache_handle(
+ hs.NewHandle(mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile())));
+ Handle<mirror::ClassLoader> class_loader_handle(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader())));
+ Handle<mirror::ArtField> resolved_field_handle(hs.NewHandle(
+ ResolveField(soa, dex_cache_handle, class_loader_handle, mUnit, field_idx, true)));
+ referrer_class = (resolved_field_handle.Get() != nullptr)
+ ? ResolveCompilingMethodsClass(soa, dex_cache_handle, class_loader_handle, mUnit) : nullptr;
+ resolved_field = resolved_field_handle.Get();
+ dex_cache = dex_cache_handle.Get();
}
bool result = false;
if (resolved_field != nullptr && referrer_class != nullptr) {
@@ -1168,17 +1177,18 @@ bool CompilerDriver::ComputeInvokeInfo(const DexCompilationUnit* mUnit, const ui
// Try to resolve the method and compiling method's class.
mirror::ArtMethod* resolved_method;
mirror::Class* referrer_class;
- SirtRef<mirror::DexCache> dex_cache(soa.Self(),
- mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(
+ hs.NewHandle(mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile())));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
+ soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader())));
{
uint32_t method_idx = target_method->dex_method_index;
- SirtRef<mirror::ArtMethod> resolved_method_sirt(soa.Self(),
- ResolveMethod(soa, dex_cache, class_loader, mUnit, method_idx, orig_invoke_type));
- referrer_class = (resolved_method_sirt.get() != nullptr)
+ Handle<mirror::ArtMethod> resolved_method_handle(hs.NewHandle(
+ ResolveMethod(soa, dex_cache, class_loader, mUnit, method_idx, orig_invoke_type)));
+ referrer_class = (resolved_method_handle.Get() != nullptr)
? ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit) : nullptr;
- resolved_method = resolved_method_sirt.get();
+ resolved_method = resolved_method_handle.Get();
}
bool result = false;
if (resolved_method != nullptr) {
@@ -1196,7 +1206,7 @@ bool CompilerDriver::ComputeInvokeInfo(const DexCompilationUnit* mUnit, const ui
// Devirtualization not enabled. Inline IsFastInvoke(), dropping the devirtualization parts.
if (UNLIKELY(referrer_class == nullptr) ||
UNLIKELY(!referrer_class->CanAccessResolvedMethod(resolved_method->GetDeclaringClass(),
- resolved_method, dex_cache.get(),
+ resolved_method, dex_cache.Get(),
target_method->dex_method_index)) ||
*invoke_type == kSuper) {
// Slow path. (Without devirtualization, all super calls go slow path as well.)
@@ -1469,8 +1479,10 @@ static void ResolveClassFieldsAndMethods(const ParallelCompilationManager* manag
const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
if (!SkipClass(class_linker, jclass_loader, dex_file, class_def)) {
ScopedObjectAccess soa(self);
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(jclass_loader));
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), class_linker->FindDexCache(dex_file));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
// Resolve the class.
mirror::Class* klass = class_linker->ResolveType(dex_file, class_def.class_idx_, dex_cache,
class_loader);
@@ -1556,9 +1568,10 @@ static void ResolveType(const ParallelCompilationManager* manager, size_t type_i
ScopedObjectAccess soa(Thread::Current());
ClassLinker* class_linker = manager->GetClassLinker();
const DexFile& dex_file = *manager->GetDexFile();
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), class_linker->FindDexCache(dex_file));
- SirtRef<mirror::ClassLoader> class_loader(
- soa.Self(), soa.Decode<mirror::ClassLoader*>(manager->GetClassLoader()));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(manager->GetClassLoader())));
mirror::Class* klass = class_linker->ResolveType(dex_file, type_idx, dex_cache, class_loader);
if (klass == NULL) {
@@ -1611,11 +1624,12 @@ static void VerifyClass(const ParallelCompilationManager* manager, size_t class_
const char* descriptor = dex_file.GetClassDescriptor(class_def);
ClassLinker* class_linker = manager->GetClassLinker();
jobject jclass_loader = manager->GetClassLoader();
- SirtRef<mirror::ClassLoader> class_loader(
- soa.Self(), soa.Decode<mirror::ClassLoader*>(jclass_loader));
- SirtRef<mirror::Class> klass(soa.Self(), class_linker->FindClass(soa.Self(), descriptor,
- class_loader));
- if (klass.get() == nullptr) {
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+ Handle<mirror::Class> klass(
+ hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader)));
+ if (klass.Get() == nullptr) {
CHECK(soa.Self()->IsExceptionPending());
soa.Self()->ClearException();
@@ -1624,7 +1638,7 @@ static void VerifyClass(const ParallelCompilationManager* manager, size_t class_
* This is to ensure the class is structurally sound for compilation. An unsound class
* will be rejected by the verifier and later skipped during compilation in the compiler.
*/
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), class_linker->FindDexCache(dex_file));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
std::string error_msg;
if (verifier::MethodVerifier::VerifyClass(&dex_file, dex_cache, class_loader, &class_def, true,
&error_msg) ==
@@ -1632,8 +1646,8 @@ static void VerifyClass(const ParallelCompilationManager* manager, size_t class_
LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(descriptor)
<< " because: " << error_msg;
}
- } else if (!SkipClass(jclass_loader, dex_file, klass.get())) {
- CHECK(klass->IsResolved()) << PrettyClass(klass.get());
+ } else if (!SkipClass(jclass_loader, dex_file, klass.Get())) {
+ CHECK(klass->IsResolved()) << PrettyClass(klass.Get());
class_linker->VerifyClass(klass);
if (klass->IsErroneous()) {
@@ -1643,7 +1657,7 @@ static void VerifyClass(const ParallelCompilationManager* manager, size_t class_
}
CHECK(klass->IsCompileTimeVerified() || klass->IsErroneous())
- << PrettyDescriptor(klass.get()) << ": state=" << klass->GetStatus();
+ << PrettyDescriptor(klass.Get()) << ": state=" << klass->GetStatus();
}
soa.Self()->AssertNoPendingException();
}
@@ -1666,13 +1680,13 @@ static void InitializeClass(const ParallelCompilationManager* manager, size_t cl
const char* descriptor = dex_file.StringDataByIdx(class_type_id.descriptor_idx_);
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(jclass_loader));
- SirtRef<mirror::Class> klass(soa.Self(),
- manager->GetClassLinker()->FindClass(soa.Self(), descriptor,
- class_loader));
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+ Handle<mirror::Class> klass(
+ hs.NewHandle(manager->GetClassLinker()->FindClass(soa.Self(), descriptor, class_loader)));
- if (klass.get() != nullptr && !SkipClass(jclass_loader, dex_file, klass.get())) {
+ if (klass.Get() != nullptr && !SkipClass(jclass_loader, dex_file, klass.Get())) {
// Only try to initialize classes that were successfully verified.
if (klass->IsVerified()) {
// Attempt to initialize the class but bail if we either need to initialize the super-class
@@ -1687,8 +1701,8 @@ static void InitializeClass(const ParallelCompilationManager* manager, size_t cl
// parent-to-child and a child-to-parent lock ordering and consequent potential deadlock.
// We need to use an ObjectLock due to potential suspension in the interpreting code. Rather
// than use a special Object for the purpose we use the Class of java.lang.Class.
- SirtRef<mirror::Class> sirt_klass(soa.Self(), klass->GetClass());
- ObjectLock<mirror::Class> lock(soa.Self(), &sirt_klass);
+ Handle<mirror::Class> h_klass(hs.NewHandle(klass->GetClass()));
+ ObjectLock<mirror::Class> lock(soa.Self(), &h_klass);
// Attempt to initialize allowing initialization of parent classes but still not static
// fields.
manager->GetClassLinker()->EnsureInitialized(klass, false, true);
@@ -1803,8 +1817,9 @@ void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, siz
DexToDexCompilationLevel dex_to_dex_compilation_level = kDontDexToDexCompile;
{
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(jclass_loader));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
dex_to_dex_compilation_level = GetDexToDexCompilationlevel(soa.Self(), class_loader, dex_file,
class_def);
}
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 6ac9cf7..f3db41f 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -52,7 +52,7 @@ struct InlineIGetIPutData;
class OatWriter;
class ParallelCompilationManager;
class ScopedObjectAccess;
-template<class T> class SirtRef;
+template<class T> class Handle;
class TimingLogger;
class VerificationResults;
class VerifiedMethod;
@@ -221,15 +221,15 @@ class CompilerDriver {
// Resolve compiling method's class. Returns nullptr on failure.
mirror::Class* ResolveCompilingMethodsClass(
- ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit)
+ ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Resolve a field. Returns nullptr on failure, including incompatible class change.
// NOTE: Unlike ClassLinker's ResolveField(), this method enforces is_static.
mirror::ArtField* ResolveField(
- ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+ ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
uint32_t field_idx, bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -258,8 +258,8 @@ class CompilerDriver {
// Resolve a method. Returns nullptr on failure, including incompatible class change.
mirror::ArtMethod* ResolveMethod(
- ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+ ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
uint32_t method_idx, InvokeType invoke_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -277,8 +277,8 @@ class CompilerDriver {
// Can we fast-path an INVOKE? If no, returns 0. If yes, returns a non-zero opaque flags value
// for ProcessedInvoke() and computes the necessary lowering info.
int IsFastInvoke(
- ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+ ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
mirror::Class* referrer_class, mirror::ArtMethod* resolved_method, InvokeType* invoke_type,
MethodReference* target_method, const MethodReference* devirt_target,
uintptr_t* direct_code, uintptr_t* direct_method)
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 86034c8..113594a 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -30,7 +30,7 @@
#include "mirror/dex_cache-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
namespace art {
@@ -80,7 +80,9 @@ class CompilerDriverTest : public CommonCompilerTest {
const DexFile::ClassDef& class_def = dex_file.GetClassDef(i);
const char* descriptor = dex_file.GetClassDescriptor(class_def);
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(class_loader));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader)));
mirror::Class* c = class_linker->FindClass(soa.Self(), descriptor, loader);
CHECK(c != NULL);
for (size_t i = 0; i < c->NumDirectMethods(); i++) {
@@ -150,7 +152,8 @@ TEST_F(CompilerDriverTest, AbstractMethodErrorStub) {
jobject class_loader;
{
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> null_loader(soa.Self(), nullptr);
+ StackHandleScope<1> hs(soa.Self());
+ auto null_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
CompileVirtualMethod(null_loader, "java.lang.Class", "isFinalizable", "()Z");
CompileDirectMethod(null_loader, "java.lang.Object", "<init>", "()V");
class_loader = LoadDex("AbstractMethod");
diff --git a/compiler/elf_writer_mclinker.cc b/compiler/elf_writer_mclinker.cc
index f688103..eb9b230 100644
--- a/compiler/elf_writer_mclinker.cc
+++ b/compiler/elf_writer_mclinker.cc
@@ -361,8 +361,8 @@ void ElfWriterMclinker::FixupOatMethodOffsets(const std::vector<const DexFile*>&
ClassLinker* linker = Runtime::Current()->GetClassLinker();
// Unchecked as we hold mutator_lock_ on entry.
ScopedObjectAccessUnchecked soa(Thread::Current());
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), linker->FindDexCache(dex_file));
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), nullptr);
+ Handle<mirror::DexCache> dex_cache(soa.Self(), linker->FindDexCache(dex_file));
+ Handle<mirror::ClassLoader> class_loader(soa.Self(), nullptr);
method = linker->ResolveMethod(dex_file, method_idx, dex_cache, class_loader, NULL, invoke_type);
CHECK(method != NULL);
}
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 62817e7..d855eee 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -51,7 +51,7 @@
#include "object_utils.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
#include "UniquePtr.h"
#include "utils.h"
@@ -382,16 +382,14 @@ void ImageWriter::CalculateObjectOffsets(Object* obj) {
DCHECK_EQ(obj, obj->AsString()->Intern());
return;
}
- Thread* self = Thread::Current();
- SirtRef<Object> sirt_obj(self, obj);
- mirror::String* interned = obj->AsString()->Intern();
- if (sirt_obj.get() != interned) {
+ mirror::String* const interned = obj->AsString()->Intern();
+ if (obj != interned) {
if (!IsImageOffsetAssigned(interned)) {
// interned obj is after us, allocate its location early
AssignImageOffset(interned);
}
// point those looking for this object to the interned version.
- SetImageOffset(sirt_obj.get(), GetImageOffset(interned));
+ SetImageOffset(obj, GetImageOffset(interned));
return;
}
// else (obj == interned), nothing to do but fall through to the normal case
@@ -404,20 +402,22 @@ ObjectArray<Object>* ImageWriter::CreateImageRoots() const {
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
Thread* self = Thread::Current();
- SirtRef<Class> object_array_class(self, class_linker->FindSystemClass(self,
- "[Ljava/lang/Object;"));
+ StackHandleScope<3> hs(self);
+ Handle<Class> object_array_class(hs.NewHandle(
+ class_linker->FindSystemClass(self, "[Ljava/lang/Object;")));
// build an Object[] of all the DexCaches used in the source_space_
- ObjectArray<Object>* dex_caches = ObjectArray<Object>::Alloc(self, object_array_class.get(),
- class_linker->GetDexCaches().size());
+ Handle<ObjectArray<Object>> dex_caches(
+ hs.NewHandle(ObjectArray<Object>::Alloc(self, object_array_class.Get(),
+ class_linker->GetDexCaches().size())));
int i = 0;
for (DexCache* dex_cache : class_linker->GetDexCaches()) {
dex_caches->Set<false>(i++, dex_cache);
}
// build an Object[] of the roots needed to restore the runtime
- SirtRef<ObjectArray<Object> > image_roots(
- self, ObjectArray<Object>::Alloc(self, object_array_class.get(), ImageHeader::kImageRootsMax));
+ Handle<ObjectArray<Object> > image_roots(hs.NewHandle(
+ ObjectArray<Object>::Alloc(self, object_array_class.Get(), ImageHeader::kImageRootsMax)));
image_roots->Set<false>(ImageHeader::kResolutionMethod, runtime->GetResolutionMethod());
image_roots->Set<false>(ImageHeader::kImtConflictMethod, runtime->GetImtConflictMethod());
image_roots->Set<false>(ImageHeader::kDefaultImt, runtime->GetDefaultImt());
@@ -427,27 +427,28 @@ ObjectArray<Object>* ImageWriter::CreateImageRoots() const {
runtime->GetCalleeSaveMethod(Runtime::kRefsOnly));
image_roots->Set<false>(ImageHeader::kRefsAndArgsSaveMethod,
runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs));
- image_roots->Set<false>(ImageHeader::kDexCaches, dex_caches);
+ image_roots->Set<false>(ImageHeader::kDexCaches, dex_caches.Get());
image_roots->Set<false>(ImageHeader::kClassRoots, class_linker->GetClassRoots());
for (int i = 0; i < ImageHeader::kImageRootsMax; i++) {
CHECK(image_roots->Get(i) != NULL);
}
- return image_roots.get();
+ return image_roots.Get();
}
// Walk instance fields of the given Class. Separate function to allow recursion on the super
// class.
void ImageWriter::WalkInstanceFields(mirror::Object* obj, mirror::Class* klass) {
// Visit fields of parent classes first.
- SirtRef<mirror::Class> sirt_class(Thread::Current(), klass);
- mirror::Class* super = sirt_class->GetSuperClass();
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::Class> h_class(hs.NewHandle(klass));
+ mirror::Class* super = h_class->GetSuperClass();
if (super != nullptr) {
WalkInstanceFields(obj, super);
}
//
- size_t num_reference_fields = sirt_class->NumReferenceInstanceFields();
+ size_t num_reference_fields = h_class->NumReferenceInstanceFields();
for (size_t i = 0; i < num_reference_fields; ++i) {
- mirror::ArtField* field = sirt_class->GetInstanceField(i);
+ mirror::ArtField* field = h_class->GetInstanceField(i);
MemberOffset field_offset = field->GetOffset();
mirror::Object* value = obj->GetFieldObject<mirror::Object>(field_offset);
if (value != nullptr) {
@@ -460,28 +461,28 @@ void ImageWriter::WalkInstanceFields(mirror::Object* obj, mirror::Class* klass)
void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) {
if (!IsImageOffsetAssigned(obj)) {
// Walk instance fields of all objects
- Thread* self = Thread::Current();
- SirtRef<mirror::Object> sirt_obj(self, obj);
- SirtRef<mirror::Class> klass(self, obj->GetClass());
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::Object> h_obj(hs.NewHandle(obj));
+ Handle<mirror::Class> klass(hs.NewHandle(obj->GetClass()));
// visit the object itself.
- CalculateObjectOffsets(sirt_obj.get());
- WalkInstanceFields(sirt_obj.get(), klass.get());
+ CalculateObjectOffsets(h_obj.Get());
+ WalkInstanceFields(h_obj.Get(), klass.Get());
// Walk static fields of a Class.
- if (sirt_obj->IsClass()) {
+ if (h_obj->IsClass()) {
size_t num_static_fields = klass->NumReferenceStaticFields();
for (size_t i = 0; i < num_static_fields; ++i) {
mirror::ArtField* field = klass->GetStaticField(i);
MemberOffset field_offset = field->GetOffset();
- mirror::Object* value = sirt_obj->GetFieldObject<mirror::Object>(field_offset);
+ mirror::Object* value = h_obj->GetFieldObject<mirror::Object>(field_offset);
if (value != nullptr) {
WalkFieldsInOrder(value);
}
}
- } else if (sirt_obj->IsObjectArray()) {
+ } else if (h_obj->IsObjectArray()) {
// Walk elements of an object array.
- int32_t length = sirt_obj->AsObjectArray<mirror::Object>()->GetLength();
+ int32_t length = h_obj->AsObjectArray<mirror::Object>()->GetLength();
for (int32_t i = 0; i < length; i++) {
- mirror::ObjectArray<mirror::Object>* obj_array = sirt_obj->AsObjectArray<mirror::Object>();
+ mirror::ObjectArray<mirror::Object>* obj_array = h_obj->AsObjectArray<mirror::Object>();
mirror::Object* value = obj_array->Get(i);
if (value != nullptr) {
WalkFieldsInOrder(value);
@@ -500,7 +501,8 @@ void ImageWriter::WalkFieldsCallback(mirror::Object* obj, void* arg) {
void ImageWriter::CalculateNewObjectOffsets(size_t oat_loaded_size, size_t oat_data_offset) {
CHECK_NE(0U, oat_loaded_size);
Thread* self = Thread::Current();
- SirtRef<ObjectArray<Object> > image_roots(self, CreateImageRoots());
+ StackHandleScope<1> hs(self);
+ Handle<ObjectArray<Object>> image_roots(hs.NewHandle(CreateImageRoots()));
gc::Heap* heap = Runtime::Current()->GetHeap();
DCHECK_EQ(0U, image_end_);
@@ -533,7 +535,7 @@ void ImageWriter::CalculateNewObjectOffsets(size_t oat_loaded_size, size_t oat_d
static_cast<uint32_t>(image_end_),
RoundUp(image_end_, kPageSize),
RoundUp(bitmap_bytes, kPageSize),
- PointerToLowMemUInt32(GetImageAddress(image_roots.get())),
+ PointerToLowMemUInt32(GetImageAddress(image_roots.Get())),
oat_file_->GetOatHeader().GetChecksum(),
PointerToLowMemUInt32(oat_file_begin),
PointerToLowMemUInt32(oat_data_begin_),
@@ -691,9 +693,10 @@ void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) {
static ArtMethod* GetTargetMethod(const CompilerDriver::CallPatchInformation* patch)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- Thread* self = Thread::Current();
- SirtRef<mirror::DexCache> dex_cache(self, class_linker->FindDexCache(*patch->GetTargetDexFile()));
- SirtRef<mirror::ClassLoader> class_loader(self, nullptr);
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(
+ hs.NewHandle(class_linker->FindDexCache(*patch->GetTargetDexFile())));
+ auto class_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
ArtMethod* method = class_linker->ResolveMethod(*patch->GetTargetDexFile(),
patch->GetTargetMethodIdx(),
dex_cache,
@@ -714,9 +717,9 @@ static ArtMethod* GetTargetMethod(const CompilerDriver::CallPatchInformation* pa
static Class* GetTargetType(const CompilerDriver::TypePatchInformation* patch)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- Thread* self = Thread::Current();
- SirtRef<mirror::DexCache> dex_cache(self, class_linker->FindDexCache(patch->GetDexFile()));
- SirtRef<mirror::ClassLoader> class_loader(self, nullptr);
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(patch->GetDexFile())));
+ auto class_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
Class* klass = class_linker->ResolveType(patch->GetDexFile(),
patch->GetTargetTypeIdx(),
dex_cache,
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 6b5e55e..6035689 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -48,7 +48,9 @@ class JniCompilerTest : public CommonCompilerTest {
void CompileForTest(jobject class_loader, bool direct,
const char* method_name, const char* method_sig) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(class_loader));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader)));
// Compile the native method before starting the runtime
mirror::Class* c = class_linker_->FindClass(soa.Self(), "LMyClassNatives;", loader);
mirror::ArtMethod* method;
@@ -153,8 +155,9 @@ TEST_F(JniCompilerTest, CompileAndRunIntMethodThroughStub) {
ScopedObjectAccess soa(Thread::Current());
std::string reason;
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(class_loader_));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader_)));
ASSERT_TRUE(
Runtime::Current()->GetJavaVM()->LoadNativeLibrary("", class_loader, &reason)) << reason;
@@ -169,8 +172,9 @@ TEST_F(JniCompilerTest, CompileAndRunStaticIntMethodThroughStub) {
ScopedObjectAccess soa(Thread::Current());
std::string reason;
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(class_loader_));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader_)));
ASSERT_TRUE(
Runtime::Current()->GetJavaVM()->LoadNativeLibrary("", class_loader, &reason)) << reason;
diff --git a/compiler/jni/portable/jni_compiler.cc b/compiler/jni/portable/jni_compiler.cc
index 0c14346..d2f54f8 100644
--- a/compiler/jni/portable/jni_compiler.cc
+++ b/compiler/jni/portable/jni_compiler.cc
@@ -98,7 +98,7 @@ CompiledMethod* JniCompiler::Compile() {
arg_begin = arg_iter;
// Count the number of Object* arguments
- uint32_t sirt_size = 1;
+ uint32_t handle_scope_size = 1;
// "this" object pointer for non-static
// "class" object pointer for static
for (unsigned i = 0; arg_iter != arg_end; ++i, ++arg_iter) {
@@ -106,12 +106,12 @@ CompiledMethod* JniCompiler::Compile() {
arg_iter->setName(StringPrintf("a%u", i));
#endif
if (arg_iter->getType() == irb_.getJObjectTy()) {
- ++sirt_size;
+ ++handle_scope_size;
}
}
// Shadow stack
- ::llvm::StructType* shadow_frame_type = irb_.getShadowFrameTy(sirt_size);
+ ::llvm::StructType* shadow_frame_type = irb_.getShadowFrameTy(handle_scope_size);
::llvm::AllocaInst* shadow_frame_ = irb_.CreateAlloca(shadow_frame_type);
// Store the dex pc
@@ -123,7 +123,7 @@ CompiledMethod* JniCompiler::Compile() {
// Push the shadow frame
::llvm::Value* shadow_frame_upcast = irb_.CreateConstGEP2_32(shadow_frame_, 0, 0);
::llvm::Value* old_shadow_frame =
- irb_.Runtime().EmitPushShadowFrame(shadow_frame_upcast, method_object_addr, sirt_size);
+ irb_.Runtime().EmitPushShadowFrame(shadow_frame_upcast, method_object_addr, handle_scope_size);
// Get JNIEnv
::llvm::Value* jni_env_object_addr =
@@ -148,35 +148,35 @@ CompiledMethod* JniCompiler::Compile() {
// Variables for GetElementPtr
::llvm::Value* gep_index[] = {
irb_.getInt32(0), // No displacement for shadow frame pointer
- irb_.getInt32(1), // SIRT
+ irb_.getInt32(1), // handle scope
NULL,
};
- size_t sirt_member_index = 0;
+ size_t handle_scope_member_index = 0;
- // Store the "this object or class object" to SIRT
- gep_index[2] = irb_.getInt32(sirt_member_index++);
- ::llvm::Value* sirt_field_addr = irb_.CreateBitCast(irb_.CreateGEP(shadow_frame_, gep_index),
+ // Store the "this object or class object" to handle scope
+ gep_index[2] = irb_.getInt32(handle_scope_member_index++);
+ ::llvm::Value* handle_scope_field_addr = irb_.CreateBitCast(irb_.CreateGEP(shadow_frame_, gep_index),
irb_.getJObjectTy()->getPointerTo());
- irb_.CreateStore(this_object_or_class_object, sirt_field_addr, kTBAAShadowFrame);
+ irb_.CreateStore(this_object_or_class_object, handle_scope_field_addr, kTBAAShadowFrame);
// Push the "this object or class object" to out args
- this_object_or_class_object = irb_.CreateBitCast(sirt_field_addr, irb_.getJObjectTy());
+ this_object_or_class_object = irb_.CreateBitCast(handle_scope_field_addr, irb_.getJObjectTy());
args.push_back(this_object_or_class_object);
- // Store arguments to SIRT, and push back to args
+ // Store arguments to handle scope, and push back to args
for (arg_iter = arg_begin; arg_iter != arg_end; ++arg_iter) {
if (arg_iter->getType() == irb_.getJObjectTy()) {
- // Store the reference type arguments to SIRT
- gep_index[2] = irb_.getInt32(sirt_member_index++);
- ::llvm::Value* sirt_field_addr = irb_.CreateBitCast(irb_.CreateGEP(shadow_frame_, gep_index),
+ // Store the reference type arguments to handle scope
+ gep_index[2] = irb_.getInt32(handle_scope_member_index++);
+ ::llvm::Value* handle_scope_field_addr = irb_.CreateBitCast(irb_.CreateGEP(shadow_frame_, gep_index),
irb_.getJObjectTy()->getPointerTo());
- irb_.CreateStore(arg_iter, sirt_field_addr, kTBAAShadowFrame);
- // Note null is placed in the SIRT but the jobject passed to the native code must be null
- // (not a pointer into the SIRT as with regular references).
+ irb_.CreateStore(arg_iter, handle_scope_field_addr, kTBAAShadowFrame);
+ // Note null is placed in the handle scope but the jobject passed to the native code must be null
+ // (not a pointer into the handle scope as with regular references).
::llvm::Value* equal_null = irb_.CreateICmpEQ(arg_iter, irb_.getJNull());
::llvm::Value* arg =
irb_.CreateSelect(equal_null,
irb_.getJNull(),
- irb_.CreateBitCast(sirt_field_addr, irb_.getJObjectTy()));
+ irb_.CreateBitCast(handle_scope_field_addr, irb_.getJObjectTy()));
args.push_back(arg);
} else {
args.push_back(arg_iter);
diff --git a/compiler/jni/quick/arm/calling_convention_arm.cc b/compiler/jni/quick/arm/calling_convention_arm.cc
index ae18d2e..649a80f 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.cc
+++ b/compiler/jni/quick/arm/calling_convention_arm.cc
@@ -144,10 +144,10 @@ ManagedRegister ArmJniCallingConvention::ReturnScratchRegister() const {
size_t ArmJniCallingConvention::FrameSize() {
// Method*, LR and callee save area size, local reference segment state
size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kFramePointerSize;
- // References plus 2 words for SIRT header
- size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSizeTarget(kFramePointerSize, ReferenceCount());
+ // References plus 2 words for HandleScope header
+ size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSizeTarget(kFramePointerSize, ReferenceCount());
// Plus return value spill area size
- return RoundUp(frame_data_size + sirt_size + SizeOfReturnValue(), kStackAlignment);
+ return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
size_t ArmJniCallingConvention::OutArgSize() {
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc
index 6212a23..ffd27ee 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.cc
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc
@@ -197,10 +197,10 @@ ManagedRegister Arm64JniCallingConvention::ReturnScratchRegister() const {
size_t Arm64JniCallingConvention::FrameSize() {
// Method*, callee save area size, local reference segment state
size_t frame_data_size = ((1 + CalleeSaveRegisters().size()) * kFramePointerSize) + sizeof(uint32_t);
- // References plus 2 words for SIRT header
- size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSizeTarget(kFramePointerSize, ReferenceCount());
+ // References plus 2 words for HandleScope header
+ size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSizeTarget(kFramePointerSize, ReferenceCount());
// Plus return value spill area size
- return RoundUp(frame_data_size + sirt_size + SizeOfReturnValue(), kStackAlignment);
+ return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
size_t Arm64JniCallingConvention::OutArgSize() {
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index a99a4c2..95c2d40 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -126,8 +126,8 @@ size_t JniCallingConvention::ReferenceCount() const {
}
FrameOffset JniCallingConvention::SavedLocalReferenceCookieOffset() const {
- size_t references_size = sirt_pointer_size_ * ReferenceCount(); // size excluding header
- return FrameOffset(SirtReferencesOffset().Int32Value() + references_size);
+ size_t references_size = handle_scope_pointer_size_ * ReferenceCount(); // size excluding header
+ return FrameOffset(HandleerencesOffset().Int32Value() + references_size);
}
FrameOffset JniCallingConvention::ReturnValueSaveLocation() const {
@@ -219,13 +219,13 @@ bool JniCallingConvention::IsCurrentParamALong() {
}
}
-// Return position of SIRT entry holding reference at the current iterator
+// Return position of handle scope entry holding reference at the current iterator
// position
-FrameOffset JniCallingConvention::CurrentParamSirtEntryOffset() {
+FrameOffset JniCallingConvention::CurrentParamHandleScopeEntryOffset() {
CHECK(IsCurrentParamAReference());
- CHECK_LT(SirtLinkOffset(), SirtNumRefsOffset());
- int result = SirtReferencesOffset().Int32Value() + itr_refs_ * sirt_pointer_size_;
- CHECK_GT(result, SirtNumRefsOffset().Int32Value());
+ CHECK_LT(HandleScopeLinkOffset(), HandleScopeNumRefsOffset());
+ int result = HandleerencesOffset().Int32Value() + itr_refs_ * handle_scope_pointer_size_;
+ CHECK_GT(result, HandleScopeNumRefsOffset().Int32Value());
return FrameOffset(result);
}
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
index 18afd58..2a6e7d9 100644
--- a/compiler/jni/quick/calling_convention.h
+++ b/compiler/jni/quick/calling_convention.h
@@ -18,7 +18,7 @@
#define ART_COMPILER_JNI_QUICK_CALLING_CONVENTION_H_
#include <vector>
-#include "stack_indirect_reference_table.h"
+#include "handle_scope.h"
#include "thread.h"
#include "utils/managed_register.h"
@@ -73,7 +73,7 @@ class CallingConvention {
: itr_slots_(0), itr_refs_(0), itr_args_(0), itr_longs_and_doubles_(0),
itr_float_and_doubles_(0), displacement_(0),
frame_pointer_size_(frame_pointer_size),
- sirt_pointer_size_(sizeof(StackReference<mirror::Object>)),
+ handle_scope_pointer_size_(sizeof(StackReference<mirror::Object>)),
is_static_(is_static), is_synchronized_(is_synchronized),
shorty_(shorty) {
num_args_ = (is_static ? 0 : 1) + strlen(shorty) - 1;
@@ -197,8 +197,8 @@ class CallingConvention {
FrameOffset displacement_;
// The size of a reference.
const size_t frame_pointer_size_;
- // The size of a reference entry within the SIRT.
- const size_t sirt_pointer_size_;
+ // The size of a reference entry within the handle scope.
+ const size_t handle_scope_pointer_size_;
private:
const bool is_static_;
@@ -315,26 +315,25 @@ class JniCallingConvention : public CallingConvention {
virtual FrameOffset CurrentParamStackOffset() = 0;
// Iterator interface extension for JNI
- FrameOffset CurrentParamSirtEntryOffset();
+ FrameOffset CurrentParamHandleScopeEntryOffset();
- // Position of SIRT and interior fields
- FrameOffset SirtOffset() const {
+ // Position of handle scope and interior fields
+ FrameOffset HandleScopeOffset() const {
return FrameOffset(this->displacement_.Int32Value() + frame_pointer_size_); // above Method*
}
- FrameOffset SirtLinkOffset() const {
- return FrameOffset(SirtOffset().Int32Value() +
- StackIndirectReferenceTable::LinkOffset(frame_pointer_size_));
+ FrameOffset HandleScopeLinkOffset() const {
+ return FrameOffset(HandleScopeOffset().Int32Value() + HandleScope::LinkOffset(frame_pointer_size_));
}
- FrameOffset SirtNumRefsOffset() const {
- return FrameOffset(SirtOffset().Int32Value() +
- StackIndirectReferenceTable::NumberOfReferencesOffset(frame_pointer_size_));
+ FrameOffset HandleScopeNumRefsOffset() const {
+ return FrameOffset(HandleScopeOffset().Int32Value() +
+ HandleScope::NumberOfReferencesOffset(frame_pointer_size_));
}
- FrameOffset SirtReferencesOffset() const {
- return FrameOffset(SirtOffset().Int32Value() +
- StackIndirectReferenceTable::ReferencesOffset(frame_pointer_size_));
+ FrameOffset HandleerencesOffset() const {
+ return FrameOffset(HandleScopeOffset().Int32Value() +
+ HandleScope::ReferencesOffset(frame_pointer_size_));
}
virtual ~JniCallingConvention() {}
@@ -350,7 +349,7 @@ class JniCallingConvention : public CallingConvention {
size_t frame_pointer_size)
: CallingConvention(is_static, is_synchronized, shorty, frame_pointer_size) {}
- // Number of stack slots for outgoing arguments, above which the SIRT is
+ // Number of stack slots for outgoing arguments, above which the handle scope is
// located
virtual size_t NumberOfOutgoingStackArgs() = 0;
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 5a22170..20f9f4b 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -103,54 +103,54 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
const std::vector<ManagedRegister>& callee_save_regs = main_jni_conv->CalleeSaveRegisters();
__ BuildFrame(frame_size, mr_conv->MethodRegister(), callee_save_regs, mr_conv->EntrySpills());
- // 2. Set up the StackIndirectReferenceTable
+ // 2. Set up the HandleScope
mr_conv->ResetIterator(FrameOffset(frame_size));
main_jni_conv->ResetIterator(FrameOffset(0));
- __ StoreImmediateToFrame(main_jni_conv->SirtNumRefsOffset(),
+ __ StoreImmediateToFrame(main_jni_conv->HandleScopeNumRefsOffset(),
main_jni_conv->ReferenceCount(),
mr_conv->InterproceduralScratchRegister());
if (is_64_bit_target) {
- __ CopyRawPtrFromThread64(main_jni_conv->SirtLinkOffset(),
- Thread::TopSirtOffset<8>(),
+ __ CopyRawPtrFromThread64(main_jni_conv->HandleScopeLinkOffset(),
+ Thread::TopHandleScopeOffset<8>(),
mr_conv->InterproceduralScratchRegister());
- __ StoreStackOffsetToThread64(Thread::TopSirtOffset<8>(),
- main_jni_conv->SirtOffset(),
+ __ StoreStackOffsetToThread64(Thread::TopHandleScopeOffset<8>(),
+ main_jni_conv->HandleScopeOffset(),
mr_conv->InterproceduralScratchRegister());
} else {
- __ CopyRawPtrFromThread32(main_jni_conv->SirtLinkOffset(),
- Thread::TopSirtOffset<4>(),
+ __ CopyRawPtrFromThread32(main_jni_conv->HandleScopeLinkOffset(),
+ Thread::TopHandleScopeOffset<4>(),
mr_conv->InterproceduralScratchRegister());
- __ StoreStackOffsetToThread32(Thread::TopSirtOffset<4>(),
- main_jni_conv->SirtOffset(),
+ __ StoreStackOffsetToThread32(Thread::TopHandleScopeOffset<4>(),
+ main_jni_conv->HandleScopeOffset(),
mr_conv->InterproceduralScratchRegister());
}
- // 3. Place incoming reference arguments into SIRT
+ // 3. Place incoming reference arguments into handle scope
main_jni_conv->Next(); // Skip JNIEnv*
// 3.5. Create Class argument for static methods out of passed method
if (is_static) {
- FrameOffset sirt_offset = main_jni_conv->CurrentParamSirtEntryOffset();
- // Check sirt offset is within frame
- CHECK_LT(sirt_offset.Uint32Value(), frame_size);
+ FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
+ // Check handle scope offset is within frame
+ CHECK_LT(handle_scope_offset.Uint32Value(), frame_size);
__ LoadRef(main_jni_conv->InterproceduralScratchRegister(),
mr_conv->MethodRegister(), mirror::ArtMethod::DeclaringClassOffset());
__ VerifyObject(main_jni_conv->InterproceduralScratchRegister(), false);
- __ StoreRef(sirt_offset, main_jni_conv->InterproceduralScratchRegister());
- main_jni_conv->Next(); // in SIRT so move to next argument
+ __ StoreRef(handle_scope_offset, main_jni_conv->InterproceduralScratchRegister());
+ main_jni_conv->Next(); // in handle scope so move to next argument
}
while (mr_conv->HasNext()) {
CHECK(main_jni_conv->HasNext());
bool ref_param = main_jni_conv->IsCurrentParamAReference();
CHECK(!ref_param || mr_conv->IsCurrentParamAReference());
- // References need placing in SIRT and the entry value passing
+ // References need placing in handle scope and the entry value passing
if (ref_param) {
- // Compute SIRT entry, note null is placed in the SIRT but its boxed value
+ // Compute handle scope entry, note null is placed in the handle scope but its boxed value
// must be NULL
- FrameOffset sirt_offset = main_jni_conv->CurrentParamSirtEntryOffset();
- // Check SIRT offset is within frame and doesn't run into the saved segment state
- CHECK_LT(sirt_offset.Uint32Value(), frame_size);
- CHECK_NE(sirt_offset.Uint32Value(),
+ FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
+ // Check handle scope offset is within frame and doesn't run into the saved segment state
+ CHECK_LT(handle_scope_offset.Uint32Value(), frame_size);
+ CHECK_NE(handle_scope_offset.Uint32Value(),
main_jni_conv->SavedLocalReferenceCookieOffset().Uint32Value());
bool input_in_reg = mr_conv->IsCurrentParamInRegister();
bool input_on_stack = mr_conv->IsCurrentParamOnStack();
@@ -159,11 +159,11 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
if (input_in_reg) {
ManagedRegister in_reg = mr_conv->CurrentParamRegister();
__ VerifyObject(in_reg, mr_conv->IsCurrentArgPossiblyNull());
- __ StoreRef(sirt_offset, in_reg);
+ __ StoreRef(handle_scope_offset, in_reg);
} else if (input_on_stack) {
FrameOffset in_off = mr_conv->CurrentParamStackOffset();
__ VerifyObject(in_off, mr_conv->IsCurrentArgPossiblyNull());
- __ CopyRef(sirt_offset, in_off,
+ __ CopyRef(handle_scope_offset, in_off,
mr_conv->InterproceduralScratchRegister());
}
}
@@ -197,20 +197,20 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
ThreadOffset<8> jni_start64 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(8, pJniMethodStartSynchronized)
: QUICK_ENTRYPOINT_OFFSET(8, pJniMethodStart);
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
- FrameOffset locked_object_sirt_offset(0);
+ FrameOffset locked_object_handle_scope_offset(0);
if (is_synchronized) {
// Pass object for locking.
main_jni_conv->Next(); // Skip JNIEnv.
- locked_object_sirt_offset = main_jni_conv->CurrentParamSirtEntryOffset();
+ locked_object_handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
if (main_jni_conv->IsCurrentParamOnStack()) {
FrameOffset out_off = main_jni_conv->CurrentParamStackOffset();
- __ CreateSirtEntry(out_off, locked_object_sirt_offset,
+ __ CreateHandleScopeEntry(out_off, locked_object_handle_scope_offset,
mr_conv->InterproceduralScratchRegister(),
false);
} else {
ManagedRegister out_reg = main_jni_conv->CurrentParamRegister();
- __ CreateSirtEntry(out_reg, locked_object_sirt_offset,
+ __ CreateHandleScopeEntry(out_reg, locked_object_handle_scope_offset,
ManagedRegister::NoRegister(), false);
}
main_jni_conv->Next();
@@ -274,15 +274,15 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
mr_conv->ResetIterator(FrameOffset(frame_size+main_out_arg_size));
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
main_jni_conv->Next(); // Skip JNIEnv*
- FrameOffset sirt_offset = main_jni_conv->CurrentParamSirtEntryOffset();
+ FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
if (main_jni_conv->IsCurrentParamOnStack()) {
FrameOffset out_off = main_jni_conv->CurrentParamStackOffset();
- __ CreateSirtEntry(out_off, sirt_offset,
+ __ CreateHandleScopeEntry(out_off, handle_scope_offset,
mr_conv->InterproceduralScratchRegister(),
false);
} else {
ManagedRegister out_reg = main_jni_conv->CurrentParamRegister();
- __ CreateSirtEntry(out_reg, sirt_offset,
+ __ CreateHandleScopeEntry(out_reg, handle_scope_offset,
ManagedRegister::NoRegister(), false);
}
}
@@ -369,12 +369,12 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
// Pass object for unlocking.
if (end_jni_conv->IsCurrentParamOnStack()) {
FrameOffset out_off = end_jni_conv->CurrentParamStackOffset();
- __ CreateSirtEntry(out_off, locked_object_sirt_offset,
+ __ CreateHandleScopeEntry(out_off, locked_object_handle_scope_offset,
end_jni_conv->InterproceduralScratchRegister(),
false);
} else {
ManagedRegister out_reg = end_jni_conv->CurrentParamRegister();
- __ CreateSirtEntry(out_reg, locked_object_sirt_offset,
+ __ CreateHandleScopeEntry(out_reg, locked_object_handle_scope_offset,
ManagedRegister::NoRegister(), false);
}
end_jni_conv->Next();
@@ -438,7 +438,7 @@ static void CopyParameter(Assembler* jni_asm,
size_t frame_size, size_t out_arg_size) {
bool input_in_reg = mr_conv->IsCurrentParamInRegister();
bool output_in_reg = jni_conv->IsCurrentParamInRegister();
- FrameOffset sirt_offset(0);
+ FrameOffset handle_scope_offset(0);
bool null_allowed = false;
bool ref_param = jni_conv->IsCurrentParamAReference();
CHECK(!ref_param || mr_conv->IsCurrentParamAReference());
@@ -449,21 +449,21 @@ static void CopyParameter(Assembler* jni_asm,
} else {
CHECK(jni_conv->IsCurrentParamOnStack());
}
- // References need placing in SIRT and the entry address passing
+ // References need placing in handle scope and the entry address passing
if (ref_param) {
null_allowed = mr_conv->IsCurrentArgPossiblyNull();
- // Compute SIRT offset. Note null is placed in the SIRT but the jobject
- // passed to the native code must be null (not a pointer into the SIRT
+ // Compute handle scope offset. Note null is placed in the handle scope but the jobject
+ // passed to the native code must be null (not a pointer into the handle scope
// as with regular references).
- sirt_offset = jni_conv->CurrentParamSirtEntryOffset();
- // Check SIRT offset is within frame.
- CHECK_LT(sirt_offset.Uint32Value(), (frame_size + out_arg_size));
+ handle_scope_offset = jni_conv->CurrentParamHandleScopeEntryOffset();
+ // Check handle scope offset is within frame.
+ CHECK_LT(handle_scope_offset.Uint32Value(), (frame_size + out_arg_size));
}
if (input_in_reg && output_in_reg) {
ManagedRegister in_reg = mr_conv->CurrentParamRegister();
ManagedRegister out_reg = jni_conv->CurrentParamRegister();
if (ref_param) {
- __ CreateSirtEntry(out_reg, sirt_offset, in_reg, null_allowed);
+ __ CreateHandleScopeEntry(out_reg, handle_scope_offset, in_reg, null_allowed);
} else {
if (!mr_conv->IsCurrentParamOnStack()) {
// regular non-straddling move
@@ -475,7 +475,7 @@ static void CopyParameter(Assembler* jni_asm,
} else if (!input_in_reg && !output_in_reg) {
FrameOffset out_off = jni_conv->CurrentParamStackOffset();
if (ref_param) {
- __ CreateSirtEntry(out_off, sirt_offset, mr_conv->InterproceduralScratchRegister(),
+ __ CreateHandleScopeEntry(out_off, handle_scope_offset, mr_conv->InterproceduralScratchRegister(),
null_allowed);
} else {
FrameOffset in_off = mr_conv->CurrentParamStackOffset();
@@ -489,7 +489,7 @@ static void CopyParameter(Assembler* jni_asm,
// Check that incoming stack arguments are above the current stack frame.
CHECK_GT(in_off.Uint32Value(), frame_size);
if (ref_param) {
- __ CreateSirtEntry(out_reg, sirt_offset, ManagedRegister::NoRegister(), null_allowed);
+ __ CreateHandleScopeEntry(out_reg, handle_scope_offset, ManagedRegister::NoRegister(), null_allowed);
} else {
size_t param_size = mr_conv->CurrentParamSize();
CHECK_EQ(param_size, jni_conv->CurrentParamSize());
@@ -502,8 +502,8 @@ static void CopyParameter(Assembler* jni_asm,
// Check outgoing argument is within frame
CHECK_LT(out_off.Uint32Value(), frame_size);
if (ref_param) {
- // TODO: recycle value in in_reg rather than reload from SIRT
- __ CreateSirtEntry(out_off, sirt_offset, mr_conv->InterproceduralScratchRegister(),
+ // TODO: recycle value in in_reg rather than reload from handle scope
+ __ CreateHandleScopeEntry(out_off, handle_scope_offset, mr_conv->InterproceduralScratchRegister(),
null_allowed);
} else {
size_t param_size = mr_conv->CurrentParamSize();
diff --git a/compiler/jni/quick/mips/calling_convention_mips.cc b/compiler/jni/quick/mips/calling_convention_mips.cc
index 8e1c0c7..0402fe6 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.cc
+++ b/compiler/jni/quick/mips/calling_convention_mips.cc
@@ -148,10 +148,10 @@ ManagedRegister MipsJniCallingConvention::ReturnScratchRegister() const {
size_t MipsJniCallingConvention::FrameSize() {
// Method*, LR and callee save area size, local reference segment state
size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kFramePointerSize;
- // References plus 2 words for SIRT header
- size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSizeTarget(kFramePointerSize, ReferenceCount());
+ // References plus 2 words for HandleScope header
+ size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSizeTarget(kFramePointerSize, ReferenceCount());
// Plus return value spill area size
- return RoundUp(frame_data_size + sirt_size + SizeOfReturnValue(), kStackAlignment);
+ return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
size_t MipsJniCallingConvention::OutArgSize() {
diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc
index 153f953..97b4cdf 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.cc
+++ b/compiler/jni/quick/x86/calling_convention_x86.cc
@@ -125,10 +125,10 @@ uint32_t X86JniCallingConvention::CoreSpillMask() const {
size_t X86JniCallingConvention::FrameSize() {
// Method*, return address and callee save area size, local reference segment state
size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kFramePointerSize;
- // References plus 2 words for SIRT header
- size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSizeTarget(kFramePointerSize, ReferenceCount());
+ // References plus 2 words for HandleScope header
+ size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSizeTarget(kFramePointerSize, ReferenceCount());
// Plus return value spill area size
- return RoundUp(frame_data_size + sirt_size + SizeOfReturnValue(), kStackAlignment);
+ return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
size_t X86JniCallingConvention::OutArgSize() {
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
index 52490e6..4871c87 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
@@ -140,10 +140,10 @@ uint32_t X86_64JniCallingConvention::CoreSpillMask() const {
size_t X86_64JniCallingConvention::FrameSize() {
// Method*, return address and callee save area size, local reference segment state
size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kFramePointerSize;
- // References plus link_ (pointer) and number_of_references_ (uint32_t) for SIRT header
- size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSizeTarget(kFramePointerSize, ReferenceCount());
+ // References plus link_ (pointer) and number_of_references_ (uint32_t) for HandleScope header
+ size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSizeTarget(kFramePointerSize, ReferenceCount());
// Plus return value spill area size
- return RoundUp(frame_data_size + sirt_size + SizeOfReturnValue(), kStackAlignment);
+ return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
size_t X86_64JniCallingConvention::OutArgSize() {
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 66972cb..558ff1f 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -153,7 +153,8 @@ TEST_F(OatTest, WriteRead) {
num_virtual_methods = it.NumVirtualMethods();
}
const char* descriptor = dex_file->GetClassDescriptor(class_def);
- SirtRef<mirror::ClassLoader> loader(soa.Self(), nullptr);
+ StackHandleScope<1> hs(soa.Self());
+ auto loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
mirror::Class* klass = class_linker->FindClass(soa.Self(), descriptor, loader);
const OatFile::OatClass oat_class = oat_dex_file->GetOatClass(i);
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 39311d9..bace25c 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -33,7 +33,7 @@
#include "output_stream.h"
#include "safe_map.h"
#include "scoped_thread_state_change.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
#include "verifier/method_verifier.h"
namespace art {
@@ -511,8 +511,9 @@ class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
InvokeType invoke_type = it.GetMethodInvokeType(dex_file_->GetClassDef(class_def_index_));
// Unchecked as we hold mutator_lock_ on entry.
ScopedObjectAccessUnchecked soa(Thread::Current());
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), linker->FindDexCache(*dex_file_));
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), nullptr);
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->FindDexCache(*dex_file_)));
+ auto class_loader = hs.NewHandle<mirror::ClassLoader>(nullptr);
mirror::ArtMethod* method = linker->ResolveMethod(*dex_file_, it.GetMemberIndex(), dex_cache,
class_loader, nullptr, invoke_type);
CHECK(method != NULL);
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index 5c839dd..64685c1 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -1752,53 +1752,53 @@ void ArmAssembler::MemoryBarrier(ManagedRegister mscratch) {
#endif
}
-void ArmAssembler::CreateSirtEntry(ManagedRegister mout_reg,
- FrameOffset sirt_offset,
+void ArmAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
+ FrameOffset handle_scope_offset,
ManagedRegister min_reg, bool null_allowed) {
ArmManagedRegister out_reg = mout_reg.AsArm();
ArmManagedRegister in_reg = min_reg.AsArm();
CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
CHECK(out_reg.IsCoreRegister()) << out_reg;
if (null_allowed) {
- // Null values get a SIRT entry value of 0. Otherwise, the SIRT entry is
- // the address in the SIRT holding the reference.
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
// e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
if (in_reg.IsNoRegister()) {
LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
- SP, sirt_offset.Int32Value());
+ SP, handle_scope_offset.Int32Value());
in_reg = out_reg;
}
cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
if (!out_reg.Equals(in_reg)) {
LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
}
- AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE);
+ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
} else {
- AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL);
+ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
}
}
-void ArmAssembler::CreateSirtEntry(FrameOffset out_off,
- FrameOffset sirt_offset,
+void ArmAssembler::CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handle_scope_offset,
ManagedRegister mscratch,
bool null_allowed) {
ArmManagedRegister scratch = mscratch.AsArm();
CHECK(scratch.IsCoreRegister()) << scratch;
if (null_allowed) {
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP,
- sirt_offset.Int32Value());
- // Null values get a SIRT entry value of 0. Otherwise, the sirt entry is
- // the address in the SIRT holding the reference.
- // e.g. scratch = (scratch == 0) ? 0 : (SP+sirt_offset)
+ handle_scope_offset.Int32Value());
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
+ // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
cmp(scratch.AsCoreRegister(), ShifterOperand(0));
- AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE);
+ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
} else {
- AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL);
+ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
}
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
}
-void ArmAssembler::LoadReferenceFromSirt(ManagedRegister mout_reg,
+void ArmAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
ManagedRegister min_reg) {
ArmManagedRegister out_reg = mout_reg.AsArm();
ArmManagedRegister in_reg = min_reg.AsArm();
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index f5be04a..396e603 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -521,20 +521,20 @@ class ArmAssembler FINAL : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the SIRT entry to see if the value is
+ // that can be used to avoid loading the handle scope entry to see if the value is
// NULL.
- void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset, ManagedRegister in_reg,
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed.
- void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset, ManagedRegister scratch,
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister scratch,
bool null_allowed) OVERRIDE;
- // src holds a SIRT entry (Object**) load this into dst
- void LoadReferenceFromSirt(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+ // src holds a handle scope entry (Object**) load this into dst
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index f486b3c..27188b2 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -539,52 +539,52 @@ void Arm64Assembler::CallFromThread64(ThreadOffset<8> /*offset*/, ManagedRegiste
UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant";
}
-void Arm64Assembler::CreateSirtEntry(ManagedRegister m_out_reg, FrameOffset sirt_offs,
+void Arm64Assembler::CreateHandleScopeEntry(ManagedRegister m_out_reg, FrameOffset handle_scope_offs,
ManagedRegister m_in_reg, bool null_allowed) {
Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
- // For now we only hold stale sirt entries in x registers.
+ // For now we only hold stale handle scope entries in x registers.
CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
CHECK(out_reg.IsCoreRegister()) << out_reg;
if (null_allowed) {
- // Null values get a SIRT entry value of 0. Otherwise, the SIRT entry is
- // the address in the SIRT holding the reference.
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
// e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
if (in_reg.IsNoRegister()) {
LoadWFromOffset(kLoadWord, out_reg.AsOverlappingCoreRegisterLow(), SP,
- sirt_offs.Int32Value());
+ handle_scope_offs.Int32Value());
in_reg = out_reg;
}
___ Cmp(reg_w(in_reg.AsOverlappingCoreRegisterLow()), 0);
if (!out_reg.Equals(in_reg)) {
LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
}
- AddConstant(out_reg.AsCoreRegister(), SP, sirt_offs.Int32Value(), NE);
+ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offs.Int32Value(), NE);
} else {
- AddConstant(out_reg.AsCoreRegister(), SP, sirt_offs.Int32Value(), AL);
+ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offs.Int32Value(), AL);
}
}
-void Arm64Assembler::CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
+void Arm64Assembler::CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handle_scope_offset,
ManagedRegister m_scratch, bool null_allowed) {
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsCoreRegister()) << scratch;
if (null_allowed) {
LoadWFromOffset(kLoadWord, scratch.AsOverlappingCoreRegisterLow(), SP,
- sirt_offset.Int32Value());
- // Null values get a SIRT entry value of 0. Otherwise, the sirt entry is
- // the address in the SIRT holding the reference.
- // e.g. scratch = (scratch == 0) ? 0 : (SP+sirt_offset)
+ handle_scope_offset.Int32Value());
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
+ // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
___ Cmp(reg_w(scratch.AsOverlappingCoreRegisterLow()), 0);
// Move this logic in add constants with flags.
- AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE);
+ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
} else {
- AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL);
+ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
}
StoreToOffset(scratch.AsCoreRegister(), SP, out_off.Int32Value());
}
-void Arm64Assembler::LoadReferenceFromSirt(ManagedRegister m_out_reg,
+void Arm64Assembler::LoadReferenceFromHandleScope(ManagedRegister m_out_reg,
ManagedRegister m_in_reg) {
Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 583150c..c866b29 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -161,20 +161,20 @@ class Arm64Assembler FINAL : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the SIRT entry to see if the value is
+ // that can be used to avoid loading the handle scope entry to see if the value is
// NULL.
- void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset,
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
ManagedRegister in_reg, bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed.
- void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
ManagedRegister scratch, bool null_allowed) OVERRIDE;
- // src holds a SIRT entry (Object**) load this into dst.
- void LoadReferenceFromSirt(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+ // src holds a handle scope entry (Object**) load this into dst.
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 219c87f..19239e1 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -453,20 +453,20 @@ class Assembler {
virtual void GetCurrentThread(FrameOffset dest_offset,
ManagedRegister scratch) = 0;
- // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the SIRT entry to see if the value is
+ // that can be used to avoid loading the handle scope entry to see if the value is
// NULL.
- virtual void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset,
+ virtual void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
ManagedRegister in_reg, bool null_allowed) = 0;
- // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed.
- virtual void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
+ virtual void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
ManagedRegister scratch, bool null_allowed) = 0;
- // src holds a SIRT entry (Object**) load this into dst
- virtual void LoadReferenceFromSirt(ManagedRegister dst,
+ // src holds a handle scope entry (Object**) load this into dst
+ virtual void LoadReferenceFromHandleScope(ManagedRegister dst,
ManagedRegister src) = 0;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index 9001f8a..8001dcd 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -827,8 +827,8 @@ void MipsAssembler::MemoryBarrier(ManagedRegister) {
UNIMPLEMENTED(FATAL) << "no mips implementation";
}
-void MipsAssembler::CreateSirtEntry(ManagedRegister mout_reg,
- FrameOffset sirt_offset,
+void MipsAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
+ FrameOffset handle_scope_offset,
ManagedRegister min_reg, bool null_allowed) {
MipsManagedRegister out_reg = mout_reg.AsMips();
MipsManagedRegister in_reg = min_reg.AsMips();
@@ -836,27 +836,27 @@ void MipsAssembler::CreateSirtEntry(ManagedRegister mout_reg,
CHECK(out_reg.IsCoreRegister()) << out_reg;
if (null_allowed) {
Label null_arg;
- // Null values get a SIRT entry value of 0. Otherwise, the SIRT entry is
- // the address in the SIRT holding the reference.
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
// e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
if (in_reg.IsNoRegister()) {
LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
- SP, sirt_offset.Int32Value());
+ SP, handle_scope_offset.Int32Value());
in_reg = out_reg;
}
if (!out_reg.Equals(in_reg)) {
LoadImmediate(out_reg.AsCoreRegister(), 0);
}
EmitBranch(in_reg.AsCoreRegister(), ZERO, &null_arg, true);
- AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value());
+ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
Bind(&null_arg, false);
} else {
- AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value());
+ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
}
}
-void MipsAssembler::CreateSirtEntry(FrameOffset out_off,
- FrameOffset sirt_offset,
+void MipsAssembler::CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handle_scope_offset,
ManagedRegister mscratch,
bool null_allowed) {
MipsManagedRegister scratch = mscratch.AsMips();
@@ -864,21 +864,21 @@ void MipsAssembler::CreateSirtEntry(FrameOffset out_off,
if (null_allowed) {
Label null_arg;
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP,
- sirt_offset.Int32Value());
- // Null values get a SIRT entry value of 0. Otherwise, the sirt entry is
- // the address in the SIRT holding the reference.
- // e.g. scratch = (scratch == 0) ? 0 : (SP+sirt_offset)
+ handle_scope_offset.Int32Value());
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
+ // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
EmitBranch(scratch.AsCoreRegister(), ZERO, &null_arg, true);
- AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value());
+ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
Bind(&null_arg, false);
} else {
- AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value());
+ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
}
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
}
-// Given a SIRT entry, load the associated reference.
-void MipsAssembler::LoadReferenceFromSirt(ManagedRegister mout_reg,
+// Given a handle scope entry, load the associated reference.
+void MipsAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
ManagedRegister min_reg) {
MipsManagedRegister out_reg = mout_reg.AsMips();
MipsManagedRegister in_reg = min_reg.AsMips();
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 75ee8b9..216cb41 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -238,20 +238,20 @@ class MipsAssembler FINAL : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the SIRT entry to see if the value is
+ // that can be used to avoid loading the handle scope entry to see if the value is
// NULL.
- void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset, ManagedRegister in_reg,
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed.
- void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset, ManagedRegister mscratch,
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister mscratch,
bool null_allowed) OVERRIDE;
- // src holds a SIRT entry (Object**) load this into dst
- void LoadReferenceFromSirt(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+ // src holds a handle scope entry (Object**) load this into dst
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 6a3efc5..0791c63 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1727,8 +1727,8 @@ void X86Assembler::MemoryBarrier(ManagedRegister) {
#endif
}
-void X86Assembler::CreateSirtEntry(ManagedRegister mout_reg,
- FrameOffset sirt_offset,
+void X86Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
+ FrameOffset handle_scope_offset,
ManagedRegister min_reg, bool null_allowed) {
X86ManagedRegister out_reg = mout_reg.AsX86();
X86ManagedRegister in_reg = min_reg.AsX86();
@@ -1742,34 +1742,34 @@ void X86Assembler::CreateSirtEntry(ManagedRegister mout_reg,
}
testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
j(kZero, &null_arg);
- leal(out_reg.AsCpuRegister(), Address(ESP, sirt_offset));
+ leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset));
Bind(&null_arg);
} else {
- leal(out_reg.AsCpuRegister(), Address(ESP, sirt_offset));
+ leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset));
}
}
-void X86Assembler::CreateSirtEntry(FrameOffset out_off,
- FrameOffset sirt_offset,
+void X86Assembler::CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handle_scope_offset,
ManagedRegister mscratch,
bool null_allowed) {
X86ManagedRegister scratch = mscratch.AsX86();
CHECK(scratch.IsCpuRegister());
if (null_allowed) {
Label null_arg;
- movl(scratch.AsCpuRegister(), Address(ESP, sirt_offset));
+ movl(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
j(kZero, &null_arg);
- leal(scratch.AsCpuRegister(), Address(ESP, sirt_offset));
+ leal(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
Bind(&null_arg);
} else {
- leal(scratch.AsCpuRegister(), Address(ESP, sirt_offset));
+ leal(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
}
Store(out_off, scratch, 4);
}
-// Given a SIRT entry, load the associated reference.
-void X86Assembler::LoadReferenceFromSirt(ManagedRegister mout_reg,
+// Given a handle scope entry, load the associated reference.
+void X86Assembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
ManagedRegister min_reg) {
X86ManagedRegister out_reg = mout_reg.AsX86();
X86ManagedRegister in_reg = min_reg.AsX86();
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 057c80a..2fc6049 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -541,20 +541,20 @@ class X86Assembler FINAL : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the SIRT entry to see if the value is
+ // that can be used to avoid loading the handle scope entry to see if the value is
// NULL.
- void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset, ManagedRegister in_reg,
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed.
- void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset, ManagedRegister scratch,
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister scratch,
bool null_allowed) OVERRIDE;
- // src holds a SIRT entry (Object**) load this into dst
- void LoadReferenceFromSirt(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+ // src holds a handle scope entry (Object**) load this into dst
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 8eaeae1..0ede875 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -1989,8 +1989,8 @@ void X86_64Assembler::MemoryBarrier(ManagedRegister) {
#endif
}
-void X86_64Assembler::CreateSirtEntry(ManagedRegister mout_reg,
- FrameOffset sirt_offset,
+void X86_64Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
+ FrameOffset handle_scope_offset,
ManagedRegister min_reg, bool null_allowed) {
X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
X86_64ManagedRegister in_reg = min_reg.AsX86_64();
@@ -1998,7 +1998,7 @@ void X86_64Assembler::CreateSirtEntry(ManagedRegister mout_reg,
// Use out_reg as indicator of NULL
in_reg = out_reg;
// TODO: movzwl
- movl(in_reg.AsCpuRegister(), Address(CpuRegister(RSP), sirt_offset));
+ movl(in_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
}
CHECK(in_reg.IsCpuRegister());
CHECK(out_reg.IsCpuRegister());
@@ -2010,34 +2010,34 @@ void X86_64Assembler::CreateSirtEntry(ManagedRegister mout_reg,
}
testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
j(kZero, &null_arg);
- leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), sirt_offset));
+ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
Bind(&null_arg);
} else {
- leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), sirt_offset));
+ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
}
}
-void X86_64Assembler::CreateSirtEntry(FrameOffset out_off,
- FrameOffset sirt_offset,
+void X86_64Assembler::CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handle_scope_offset,
ManagedRegister mscratch,
bool null_allowed) {
X86_64ManagedRegister scratch = mscratch.AsX86_64();
CHECK(scratch.IsCpuRegister());
if (null_allowed) {
Label null_arg;
- movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), sirt_offset));
+ movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
j(kZero, &null_arg);
- leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), sirt_offset));
+ leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
Bind(&null_arg);
} else {
- leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), sirt_offset));
+ leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
}
Store(out_off, scratch, 8);
}
-// Given a SIRT entry, load the associated reference.
-void X86_64Assembler::LoadReferenceFromSirt(ManagedRegister mout_reg,
+// Given a handle scope entry, load the associated reference.
+void X86_64Assembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
ManagedRegister min_reg) {
X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
X86_64ManagedRegister in_reg = min_reg.AsX86_64();
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 87fb359..548d379 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -566,20 +566,20 @@ class X86_64Assembler FINAL : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the SIRT entry to see if the value is
+ // that can be used to avoid loading the handle scope entry to see if the value is
// NULL.
- void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset, ManagedRegister in_reg,
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed.
- void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset, ManagedRegister scratch,
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister scratch,
bool null_allowed) OVERRIDE;
- // src holds a SIRT entry (Object**) load this into dst
- virtual void LoadReferenceFromSirt(ManagedRegister dst,
+ // src holds a handle scope entry (Object**) load this into dst
+ virtual void LoadReferenceFromHandleScope(ManagedRegister dst,
ManagedRegister src);
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index e0bfc6b..cea86ae 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -321,7 +321,7 @@ class Dex2Oat {
TimingLogger& timings,
CumulativeLogger& compiler_phases_timings,
std::string profile_file) {
- // SirtRef and ClassLoader creation needs to come after Runtime::Create
+ // Handle and ClassLoader creation needs to come after Runtime::Create
jobject class_loader = NULL;
Thread* self = Thread::Current();
if (!boot_image_option.empty()) {
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index fc60c02..fef25e0 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -417,9 +417,10 @@ class OatDumper {
Runtime* runtime = Runtime::Current();
if (runtime != nullptr) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::DexCache> dex_cache(
- soa.Self(), runtime->GetClassLinker()->FindDexCache(dex_file));
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), nullptr);
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(
+ hs.NewHandle(runtime->GetClassLinker()->FindDexCache(dex_file)));
+ auto class_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
verifier::MethodVerifier verifier(&dex_file, &dex_cache, &class_loader, &class_def,
code_item, dex_method_idx, nullptr, method_access_flags,
true, true);
@@ -687,11 +688,12 @@ class OatDumper {
uint32_t method_access_flags) {
if ((method_access_flags & kAccNative) == 0) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::DexCache> dex_cache(
- soa.Self(), Runtime::Current()->GetClassLinker()->FindDexCache(*dex_file));
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), nullptr);
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(
+ hs.NewHandle(Runtime::Current()->GetClassLinker()->FindDexCache(*dex_file)));
+ auto class_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
verifier::MethodVerifier::VerifyMethodAndDump(os, dex_method_idx, dex_file, dex_cache,
- class_loader, &class_def, code_item, NULL,
+ class_loader, &class_def, code_item, nullptr,
method_access_flags);
}
}
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index c056b2f..2a21144 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1444,7 +1444,7 @@ END art_quick_resolution_trampoline
* | RDI/Method* | <- X0
* #-------------------#
* | local ref cookie | // 4B
- * | SIRT size | // 4B
+ * | handle scope size | // 4B
* #-------------------#
* | JNI Call Stack |
* #-------------------# <--- SP on native call
@@ -1471,7 +1471,7 @@ ENTRY art_quick_generic_jni_trampoline
.cfi_def_cfa_register x28
// This looks the same, but is different: this will be updated to point to the bottom
- // of the frame when the SIRT is inserted.
+ // of the frame when the handle scope is inserted.
mov xFP, sp
mov x8, #5120
@@ -1486,7 +1486,7 @@ ENTRY art_quick_generic_jni_trampoline
mov x1, xFP
bl artQuickGenericJniTrampoline // (Thread*, sp)
- // Get the updated pointer. This is the bottom of the frame _with_ SIRT.
+ // Get the updated pointer. This is the bottom of the frame _with_ handle scope.
ldr xFP, [sp]
add x9, sp, #8
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 6a2bfb5..d9bc105 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -371,13 +371,14 @@ TEST_F(StubTest, LockObject) {
ScopedObjectAccess soa(self);
// garbage is created during ClassLinker::Init
- SirtRef<mirror::String> obj(soa.Self(),
- mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!"));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::String> obj(
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
LockWord lock = obj->GetLockWord(false);
LockWord::LockState old_state = lock.GetState();
EXPECT_EQ(LockWord::LockState::kUnlocked, old_state);
- Invoke3(reinterpret_cast<size_t>(obj.get()), 0U, 0U,
+ Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U,
reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
LockWord lock_after = obj->GetLockWord(false);
@@ -386,7 +387,7 @@ TEST_F(StubTest, LockObject) {
EXPECT_EQ(lock_after.ThinLockCount(), 0U); // Thin lock starts count at zero
for (size_t i = 1; i < kThinLockLoops; ++i) {
- Invoke3(reinterpret_cast<size_t>(obj.get()), 0U, 0U,
+ Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U,
reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
// Check we're at lock count i
@@ -398,12 +399,12 @@ TEST_F(StubTest, LockObject) {
}
// Force a fat lock by running identity hashcode to fill up lock word.
- SirtRef<mirror::Object> obj2(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(),
- "hello, world!"));
+ Handle<mirror::String> obj2(hs.NewHandle(
+ mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
obj2->IdentityHashCode();
- Invoke3(reinterpret_cast<size_t>(obj2.get()), 0U, 0U,
+ Invoke3(reinterpret_cast<size_t>(obj2.Get()), 0U, 0U,
reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
LockWord lock_after2 = obj2->GetLockWord(false);
@@ -447,16 +448,16 @@ static void TestUnlockObject(StubTest* test) NO_THREAD_SAFETY_ANALYSIS {
// Create an object
ScopedObjectAccess soa(self);
// garbage is created during ClassLinker::Init
-
- SirtRef<mirror::String> obj(soa.Self(),
- mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!"));
+ static constexpr size_t kNumberOfLocks = 10; // Number of objects = lock
+ StackHandleScope<kNumberOfLocks + 1> hs(self);
+ Handle<mirror::String> obj(
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
LockWord lock = obj->GetLockWord(false);
LockWord::LockState old_state = lock.GetState();
EXPECT_EQ(LockWord::LockState::kUnlocked, old_state);
- test->Invoke3(reinterpret_cast<size_t>(obj.get()), 0U, 0U,
+ test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U,
reinterpret_cast<uintptr_t>(&art_quick_unlock_object), self);
-
// This should be an illegal monitor state.
EXPECT_TRUE(self->IsExceptionPending());
self->ClearException();
@@ -465,14 +466,14 @@ static void TestUnlockObject(StubTest* test) NO_THREAD_SAFETY_ANALYSIS {
LockWord::LockState new_state = lock_after.GetState();
EXPECT_EQ(LockWord::LockState::kUnlocked, new_state);
- test->Invoke3(reinterpret_cast<size_t>(obj.get()), 0U, 0U,
+ test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U,
reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
LockWord lock_after2 = obj->GetLockWord(false);
LockWord::LockState new_state2 = lock_after2.GetState();
EXPECT_EQ(LockWord::LockState::kThinLocked, new_state2);
- test->Invoke3(reinterpret_cast<size_t>(obj.get()), 0U, 0U,
+ test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U,
reinterpret_cast<uintptr_t>(&art_quick_unlock_object), self);
LockWord lock_after3 = obj->GetLockWord(false);
@@ -485,20 +486,18 @@ static void TestUnlockObject(StubTest* test) NO_THREAD_SAFETY_ANALYSIS {
RandGen r(0x1234);
- constexpr size_t kNumberOfLocks = 10; // Number of objects = lock
constexpr size_t kIterations = 10000; // Number of iterations
constexpr size_t kMoveToFat = 1000; // Chance of 1:kMoveFat to make a lock fat.
size_t counts[kNumberOfLocks];
bool fat[kNumberOfLocks]; // Whether a lock should be thin or fat.
- SirtRef<mirror::String>* objects[kNumberOfLocks];
+ Handle<mirror::String> objects[kNumberOfLocks];
// Initialize = allocate.
for (size_t i = 0; i < kNumberOfLocks; ++i) {
counts[i] = 0;
fat[i] = false;
- objects[i] = new SirtRef<mirror::String>(soa.Self(),
- mirror::String::AllocFromModifiedUtf8(soa.Self(), ""));
+ objects[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), ""));
}
for (size_t i = 0; i < kIterations; ++i) {
@@ -508,9 +507,9 @@ static void TestUnlockObject(StubTest* test) NO_THREAD_SAFETY_ANALYSIS {
// Make lock fat?
if (!fat[index] && (r.next() % kMoveToFat == 0)) {
fat[index] = true;
- objects[index]->get()->IdentityHashCode();
+ objects[index]->IdentityHashCode();
- LockWord lock_iter = objects[index]->get()->GetLockWord(false);
+ LockWord lock_iter = objects[index]->GetLockWord(false);
LockWord::LockState iter_state = lock_iter.GetState();
if (counts[index] == 0) {
EXPECT_EQ(LockWord::LockState::kHashCode, iter_state);
@@ -529,11 +528,11 @@ static void TestUnlockObject(StubTest* test) NO_THREAD_SAFETY_ANALYSIS {
}
if (lock) {
- test-> Invoke3(reinterpret_cast<size_t>(objects[index]->get()), 0U, 0U,
+ test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U,
reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
counts[index]++;
} else {
- test->Invoke3(reinterpret_cast<size_t>(objects[index]->get()), 0U, 0U,
+ test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U,
reinterpret_cast<uintptr_t>(&art_quick_unlock_object), self);
counts[index]--;
}
@@ -541,12 +540,12 @@ static void TestUnlockObject(StubTest* test) NO_THREAD_SAFETY_ANALYSIS {
EXPECT_FALSE(self->IsExceptionPending());
// Check the new state.
- LockWord lock_iter = objects[index]->get()->GetLockWord(true);
+ LockWord lock_iter = objects[index]->GetLockWord(true);
LockWord::LockState iter_state = lock_iter.GetState();
if (fat[index]) {
// Abuse MonitorInfo.
EXPECT_EQ(LockWord::LockState::kFatLocked, iter_state) << index;
- MonitorInfo info(objects[index]->get());
+ MonitorInfo info(objects[index].Get());
EXPECT_EQ(counts[index], info.entry_count_) << index;
} else {
if (counts[index] > 0) {
@@ -560,23 +559,20 @@ static void TestUnlockObject(StubTest* test) NO_THREAD_SAFETY_ANALYSIS {
}
// Unlock the remaining count times and then check it's unlocked. Then deallocate.
- // Go reverse order to correctly handle SirtRefs.
+ // Go reverse order to correctly handle Handles.
for (size_t i = 0; i < kNumberOfLocks; ++i) {
size_t index = kNumberOfLocks - 1 - i;
size_t count = counts[index];
while (count > 0) {
- test->Invoke3(reinterpret_cast<size_t>(objects[index]->get()), 0U, 0U,
+ test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U,
reinterpret_cast<uintptr_t>(&art_quick_unlock_object), self);
-
count--;
}
- LockWord lock_after4 = objects[index]->get()->GetLockWord(false);
+ LockWord lock_after4 = objects[index]->GetLockWord(false);
LockWord::LockState new_state4 = lock_after4.GetState();
EXPECT_TRUE(LockWord::LockState::kUnlocked == new_state4
|| LockWord::LockState::kFatLocked == new_state4);
-
- delete objects[index];
}
// Test done.
@@ -602,31 +598,32 @@ TEST_F(StubTest, CheckCast) {
ScopedObjectAccess soa(self);
// garbage is created during ClassLinker::Init
- SirtRef<mirror::Class> c(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
- "[Ljava/lang/Object;"));
- SirtRef<mirror::Class> c2(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
- "[Ljava/lang/String;"));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::Class> c(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
+ Handle<mirror::Class> c2(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/String;")));
EXPECT_FALSE(self->IsExceptionPending());
- Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(c.get()), 0U,
+ Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(c.Get()), 0U,
reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
EXPECT_FALSE(self->IsExceptionPending());
- Invoke3(reinterpret_cast<size_t>(c2.get()), reinterpret_cast<size_t>(c2.get()), 0U,
+ Invoke3(reinterpret_cast<size_t>(c2.Get()), reinterpret_cast<size_t>(c2.Get()), 0U,
reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
EXPECT_FALSE(self->IsExceptionPending());
- Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(c2.get()), 0U,
+ Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(c2.Get()), 0U,
reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
EXPECT_FALSE(self->IsExceptionPending());
// TODO: Make the following work. But that would require correct managed frames.
- Invoke3(reinterpret_cast<size_t>(c2.get()), reinterpret_cast<size_t>(c.get()), 0U,
+ Invoke3(reinterpret_cast<size_t>(c2.Get()), reinterpret_cast<size_t>(c.Get()), 0U,
reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
EXPECT_TRUE(self->IsExceptionPending());
@@ -654,23 +651,22 @@ TEST_F(StubTest, APutObj) {
ScopedObjectAccess soa(self);
// garbage is created during ClassLinker::Init
- SirtRef<mirror::Class> c(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
- "Ljava/lang/Object;"));
- SirtRef<mirror::Class> c2(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
- "Ljava/lang/String;"));
- SirtRef<mirror::Class> ca(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
- "[Ljava/lang/String;"));
+ StackHandleScope<5> hs(soa.Self());
+ Handle<mirror::Class> c(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
+ Handle<mirror::Class> ca(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/String;")));
// Build a string array of size 1
- SirtRef<mirror::ObjectArray<mirror::Object> > array(soa.Self(),
- mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), ca.get(), 10));
+ Handle<mirror::ObjectArray<mirror::Object>> array(
+ hs.NewHandle(mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), ca.Get(), 10)));
// Build a string -> should be assignable
- SirtRef<mirror::Object> str_obj(soa.Self(),
- mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!"));
+ Handle<mirror::String> str_obj(
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
// Build a generic object -> should fail assigning
- SirtRef<mirror::Object> obj_obj(soa.Self(), c->AllocObject(soa.Self()));
+ Handle<mirror::Object> obj_obj(hs.NewHandle(c->AllocObject(soa.Self())));
// Play with it...
@@ -679,51 +675,51 @@ TEST_F(StubTest, APutObj) {
EXPECT_FALSE(self->IsExceptionPending());
- Invoke3(reinterpret_cast<size_t>(array.get()), 0U, reinterpret_cast<size_t>(str_obj.get()),
+ Invoke3(reinterpret_cast<size_t>(array.Get()), 0U, reinterpret_cast<size_t>(str_obj.Get()),
reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
EXPECT_FALSE(self->IsExceptionPending());
- EXPECT_EQ(str_obj.get(), array->Get(0));
+ EXPECT_EQ(str_obj.Get(), array->Get(0));
- Invoke3(reinterpret_cast<size_t>(array.get()), 1U, reinterpret_cast<size_t>(str_obj.get()),
+ Invoke3(reinterpret_cast<size_t>(array.Get()), 1U, reinterpret_cast<size_t>(str_obj.Get()),
reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
EXPECT_FALSE(self->IsExceptionPending());
- EXPECT_EQ(str_obj.get(), array->Get(1));
+ EXPECT_EQ(str_obj.Get(), array->Get(1));
- Invoke3(reinterpret_cast<size_t>(array.get()), 2U, reinterpret_cast<size_t>(str_obj.get()),
+ Invoke3(reinterpret_cast<size_t>(array.Get()), 2U, reinterpret_cast<size_t>(str_obj.Get()),
reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
EXPECT_FALSE(self->IsExceptionPending());
- EXPECT_EQ(str_obj.get(), array->Get(2));
+ EXPECT_EQ(str_obj.Get(), array->Get(2));
- Invoke3(reinterpret_cast<size_t>(array.get()), 3U, reinterpret_cast<size_t>(str_obj.get()),
+ Invoke3(reinterpret_cast<size_t>(array.Get()), 3U, reinterpret_cast<size_t>(str_obj.Get()),
reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
EXPECT_FALSE(self->IsExceptionPending());
- EXPECT_EQ(str_obj.get(), array->Get(3));
+ EXPECT_EQ(str_obj.Get(), array->Get(3));
// 1.2) Assign null to array[0..3]
- Invoke3(reinterpret_cast<size_t>(array.get()), 0U, reinterpret_cast<size_t>(nullptr),
+ Invoke3(reinterpret_cast<size_t>(array.Get()), 0U, reinterpret_cast<size_t>(nullptr),
reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_EQ(nullptr, array->Get(0));
- Invoke3(reinterpret_cast<size_t>(array.get()), 1U, reinterpret_cast<size_t>(nullptr),
+ Invoke3(reinterpret_cast<size_t>(array.Get()), 1U, reinterpret_cast<size_t>(nullptr),
reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_EQ(nullptr, array->Get(1));
- Invoke3(reinterpret_cast<size_t>(array.get()), 2U, reinterpret_cast<size_t>(nullptr),
+ Invoke3(reinterpret_cast<size_t>(array.Get()), 2U, reinterpret_cast<size_t>(nullptr),
reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_EQ(nullptr, array->Get(2));
- Invoke3(reinterpret_cast<size_t>(array.get()), 3U, reinterpret_cast<size_t>(nullptr),
+ Invoke3(reinterpret_cast<size_t>(array.Get()), 3U, reinterpret_cast<size_t>(nullptr),
reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
EXPECT_FALSE(self->IsExceptionPending());
@@ -735,7 +731,7 @@ TEST_F(StubTest, APutObj) {
// 2.1) Array = null
// TODO: Throwing NPE needs actual DEX code
-// Invoke3(reinterpret_cast<size_t>(nullptr), 0U, reinterpret_cast<size_t>(str_obj.get()),
+// Invoke3(reinterpret_cast<size_t>(nullptr), 0U, reinterpret_cast<size_t>(str_obj.Get()),
// reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
//
// EXPECT_TRUE(self->IsExceptionPending());
@@ -743,8 +739,8 @@ TEST_F(StubTest, APutObj) {
// 2.2) Index < 0
- Invoke3(reinterpret_cast<size_t>(array.get()), static_cast<size_t>(-1),
- reinterpret_cast<size_t>(str_obj.get()),
+ Invoke3(reinterpret_cast<size_t>(array.Get()), static_cast<size_t>(-1),
+ reinterpret_cast<size_t>(str_obj.Get()),
reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
EXPECT_TRUE(self->IsExceptionPending());
@@ -752,7 +748,7 @@ TEST_F(StubTest, APutObj) {
// 2.3) Index > 0
- Invoke3(reinterpret_cast<size_t>(array.get()), 10U, reinterpret_cast<size_t>(str_obj.get()),
+ Invoke3(reinterpret_cast<size_t>(array.Get()), 10U, reinterpret_cast<size_t>(str_obj.Get()),
reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
EXPECT_TRUE(self->IsExceptionPending());
@@ -760,7 +756,7 @@ TEST_F(StubTest, APutObj) {
// 3) Failure cases (obj into str[])
- Invoke3(reinterpret_cast<size_t>(array.get()), 0U, reinterpret_cast<size_t>(obj_obj.get()),
+ Invoke3(reinterpret_cast<size_t>(array.Get()), 0U, reinterpret_cast<size_t>(obj_obj.Get()),
reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
EXPECT_TRUE(self->IsExceptionPending());
@@ -785,8 +781,9 @@ TEST_F(StubTest, AllocObject) {
ScopedObjectAccess soa(self);
// garbage is created during ClassLinker::Init
- SirtRef<mirror::Class> c(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
- "Ljava/lang/Object;"));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::Class> c(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
// Play with it...
@@ -802,35 +799,35 @@ TEST_F(StubTest, AllocObject) {
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
- EXPECT_EQ(c.get(), obj->GetClass());
+ EXPECT_EQ(c.Get(), obj->GetClass());
VerifyObject(obj);
}
{
// We can use nullptr in the second argument as we do not need a method here (not used in
// resolved/initialized cases)
- size_t result = Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(nullptr), 0U,
+ size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 0U,
reinterpret_cast<uintptr_t>(GetTlsPtr(self)->quick_entrypoints.pAllocObjectResolved),
self);
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
- EXPECT_EQ(c.get(), obj->GetClass());
+ EXPECT_EQ(c.Get(), obj->GetClass());
VerifyObject(obj);
}
{
// We can use nullptr in the second argument as we do not need a method here (not used in
// resolved/initialized cases)
- size_t result = Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(nullptr), 0U,
+ size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 0U,
reinterpret_cast<uintptr_t>(GetTlsPtr(self)->quick_entrypoints.pAllocObjectInitialized),
self);
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
- EXPECT_EQ(c.get(), obj->GetClass());
+ EXPECT_EQ(c.Get(), obj->GetClass());
VerifyObject(obj);
}
@@ -841,19 +838,21 @@ TEST_F(StubTest, AllocObject) {
Runtime::Current()->GetHeap()->SetIdealFootprint(1 * GB);
// Array helps to fill memory faster.
- SirtRef<mirror::Class> ca(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
- "[Ljava/lang/Object;"));
- std::vector<SirtRef<mirror::Object>*> sirt_refs;
+ Handle<mirror::Class> ca(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
+
+ // Use arbitrary large amount for now.
+ static const size_t kMaxHandles = 1000000;
+ UniquePtr<StackHandleScope<kMaxHandles> > hsp(new StackHandleScope<kMaxHandles>(self));
+
+ std::vector<Handle<mirror::Object>> handles;
// Start allocating with 128K
size_t length = 128 * KB / 4;
while (length > 10) {
- SirtRef<mirror::Object>* ref = new SirtRef<mirror::Object>(soa.Self(),
- mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(),
- ca.get(),
- length/4));
- if (self->IsExceptionPending() || ref->get() == nullptr) {
+ Handle<mirror::Object> h(hsp->NewHandle<mirror::Object>(
+ mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), ca.Get(), length / 4)));
+ if (self->IsExceptionPending() || h.Get() == nullptr) {
self->ClearException();
- delete ref;
// Try a smaller length
length = length / 8;
@@ -863,38 +862,26 @@ TEST_F(StubTest, AllocObject) {
length = mem / 8;
}
} else {
- sirt_refs.push_back(ref);
+ handles.push_back(h);
}
}
- LOG(INFO) << "Used " << sirt_refs.size() << " arrays to fill space.";
+ LOG(INFO) << "Used " << handles.size() << " arrays to fill space.";
// Allocate simple objects till it fails.
while (!self->IsExceptionPending()) {
- SirtRef<mirror::Object>* ref = new SirtRef<mirror::Object>(soa.Self(),
- c->AllocObject(soa.Self()));
- if (!self->IsExceptionPending() && ref->get() != nullptr) {
- sirt_refs.push_back(ref);
- } else {
- delete ref;
+ Handle<mirror::Object> h = hsp->NewHandle(c->AllocObject(soa.Self()));
+ if (!self->IsExceptionPending() && h.Get() != nullptr) {
+ handles.push_back(h);
}
}
self->ClearException();
- size_t result = Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(nullptr), 0U,
+ size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 0U,
reinterpret_cast<uintptr_t>(GetTlsPtr(self)->quick_entrypoints.pAllocObjectInitialized),
self);
-
EXPECT_TRUE(self->IsExceptionPending());
self->ClearException();
EXPECT_EQ(reinterpret_cast<size_t>(nullptr), result);
-
- // Release all the allocated objects.
- // Need to go backward to release SirtRef in the right order.
- auto it = sirt_refs.rbegin();
- auto end = sirt_refs.rend();
- for (; it != end; ++it) {
- delete *it;
- }
}
// Tests done.
@@ -916,12 +903,13 @@ TEST_F(StubTest, AllocObjectArray) {
ScopedObjectAccess soa(self);
// garbage is created during ClassLinker::Init
- SirtRef<mirror::Class> c(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
- "[Ljava/lang/Object;"));
+ StackHandleScope<2> hs(self);
+ Handle<mirror::Class> c(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
// Needed to have a linked method.
- SirtRef<mirror::Class> c_obj(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
- "Ljava/lang/Object;"));
+ Handle<mirror::Class> c_obj(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
// Play with it...
@@ -941,7 +929,7 @@ TEST_F(StubTest, AllocObjectArray) {
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
mirror::Array* obj = reinterpret_cast<mirror::Array*>(result);
- EXPECT_EQ(c.get(), obj->GetClass());
+ EXPECT_EQ(c.Get(), obj->GetClass());
VerifyObject(obj);
EXPECT_EQ(obj->GetLength(), 10);
}
@@ -949,16 +937,15 @@ TEST_F(StubTest, AllocObjectArray) {
{
// We can use nullptr in the second argument as we do not need a method here (not used in
// resolved/initialized cases)
- size_t result = Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(nullptr), 10U,
+ size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 10U,
reinterpret_cast<uintptr_t>(GetTlsPtr(self)->quick_entrypoints.pAllocArrayResolved),
self);
-
EXPECT_FALSE(self->IsExceptionPending()) << PrettyTypeOf(self->GetException(nullptr));
EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
EXPECT_TRUE(obj->IsArrayInstance());
EXPECT_TRUE(obj->IsObjectArray());
- EXPECT_EQ(c.get(), obj->GetClass());
+ EXPECT_EQ(c.Get(), obj->GetClass());
VerifyObject(obj);
mirror::Array* array = reinterpret_cast<mirror::Array*>(result);
EXPECT_EQ(array->GetLength(), 10);
@@ -968,7 +955,7 @@ TEST_F(StubTest, AllocObjectArray) {
// Out-of-memory.
{
- size_t result = Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(nullptr),
+ size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr),
GB, // that should fail...
reinterpret_cast<uintptr_t>(GetTlsPtr(self)->quick_entrypoints.pAllocArrayResolved),
self);
@@ -1005,32 +992,31 @@ TEST_F(StubTest, StringCompareTo) {
// Use array so we can index into it and use a matrix for expected results
// Setup: The first half is standard. The second half uses a non-zero offset.
// TODO: Shared backing arrays.
- constexpr size_t base_string_count = 7;
- const char* c[base_string_count] = { "", "", "a", "aa", "ab", "aac", "aac" , };
+ static constexpr size_t kBaseStringCount = 7;
+ const char* c[kBaseStringCount] = { "", "", "a", "aa", "ab", "aac", "aac" , };
- constexpr size_t string_count = 2 * base_string_count;
+ static constexpr size_t kStringCount = 2 * kBaseStringCount;
- SirtRef<mirror::String>* s[string_count];
+ StackHandleScope<kStringCount> hs(self);
+ Handle<mirror::String> s[kStringCount];
- for (size_t i = 0; i < base_string_count; ++i) {
- s[i] = new SirtRef<mirror::String>(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(),
- c[i]));
+ for (size_t i = 0; i < kBaseStringCount; ++i) {
+ s[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), c[i]));
}
RandGen r(0x1234);
- for (size_t i = base_string_count; i < string_count; ++i) {
- s[i] = new SirtRef<mirror::String>(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(),
- c[i - base_string_count]));
- int32_t length = s[i]->get()->GetLength();
+ for (size_t i = kBaseStringCount; i < kStringCount; ++i) {
+ s[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), c[i - kBaseStringCount]));
+ int32_t length = s[i]->GetLength();
if (length > 1) {
// Set a random offset and length.
int32_t new_offset = 1 + (r.next() % (length - 1));
int32_t rest = length - new_offset - 1;
int32_t new_length = 1 + (rest > 0 ? r.next() % rest : 0);
- s[i]->get()->SetField32<false>(mirror::String::CountOffset(), new_length);
- s[i]->get()->SetField32<false>(mirror::String::OffsetOffset(), new_offset);
+ s[i]->SetField32<false>(mirror::String::CountOffset(), new_length);
+ s[i]->SetField32<false>(mirror::String::OffsetOffset(), new_offset);
}
}
@@ -1039,20 +1025,20 @@ TEST_F(StubTest, StringCompareTo) {
// Matrix of expectations. First component is first parameter. Note we only check against the
// sign, not the value. As we are testing random offsets, we need to compute this and need to
// rely on String::CompareTo being correct.
- int32_t expected[string_count][string_count];
- for (size_t x = 0; x < string_count; ++x) {
- for (size_t y = 0; y < string_count; ++y) {
- expected[x][y] = s[x]->get()->CompareTo(s[y]->get());
+ int32_t expected[kStringCount][kStringCount];
+ for (size_t x = 0; x < kStringCount; ++x) {
+ for (size_t y = 0; y < kStringCount; ++y) {
+ expected[x][y] = s[x]->CompareTo(s[y].Get());
}
}
// Play with it...
- for (size_t x = 0; x < string_count; ++x) {
- for (size_t y = 0; y < string_count; ++y) {
+ for (size_t x = 0; x < kStringCount; ++x) {
+ for (size_t y = 0; y < kStringCount; ++y) {
// Test string_compareto x y
- size_t result = Invoke3(reinterpret_cast<size_t>(s[x]->get()),
- reinterpret_cast<size_t>(s[y]->get()), 0U,
+ size_t result = Invoke3(reinterpret_cast<size_t>(s[x].Get()),
+ reinterpret_cast<size_t>(s[y].Get()), 0U,
reinterpret_cast<uintptr_t>(&art_quick_string_compareto), self);
EXPECT_FALSE(self->IsExceptionPending());
@@ -1090,7 +1076,7 @@ extern "C" void art_quick_set32_static(void);
extern "C" void art_quick_get32_static(void);
#endif
-static void GetSet32Static(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtField>* f, Thread* self,
+static void GetSet32Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
@@ -1126,7 +1112,7 @@ extern "C" void art_quick_set32_instance(void);
extern "C" void art_quick_get32_instance(void);
#endif
-static void GetSet32Instance(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtField>* f,
+static void GetSet32Instance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f,
Thread* self, mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
@@ -1135,20 +1121,20 @@ static void GetSet32Instance(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtFi
for (size_t i = 0; i < num_values; ++i) {
test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
- reinterpret_cast<size_t>(obj->get()),
+ reinterpret_cast<size_t>(obj->Get()),
static_cast<size_t>(values[i]),
reinterpret_cast<uintptr_t>(&art_quick_set32_instance),
self,
referrer);
- int32_t res = f->get()->GetInt(obj->get());
+ int32_t res = f->Get()->GetInt(obj->Get());
EXPECT_EQ(res, static_cast<int32_t>(values[i])) << "Iteration " << i;
res++;
- f->get()->SetInt<false>(obj->get(), res);
+ f->Get()->SetInt<false>(obj->Get(), res);
size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
- reinterpret_cast<size_t>(obj->get()),
+ reinterpret_cast<size_t>(obj->Get()),
0U,
reinterpret_cast<uintptr_t>(&art_quick_get32_instance),
self,
@@ -1187,7 +1173,7 @@ static void set_and_check_static(uint32_t f_idx, mirror::Object* val, Thread* se
}
#endif
-static void GetSetObjStatic(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtField>* f, Thread* self,
+static void GetSetObjStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
@@ -1210,7 +1196,7 @@ static void GetSetObjStatic(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtFie
extern "C" void art_quick_set_obj_instance(void);
extern "C" void art_quick_get_obj_instance(void);
-static void set_and_check_instance(SirtRef<mirror::ArtField>* f, mirror::Object* trg,
+static void set_and_check_instance(Handle<mirror::ArtField>* f, mirror::Object* trg,
mirror::Object* val, Thread* self, mirror::ArtMethod* referrer,
StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -1230,21 +1216,21 @@ static void set_and_check_instance(SirtRef<mirror::ArtField>* f, mirror::Object*
EXPECT_EQ(res, reinterpret_cast<size_t>(val)) << "Value " << val;
- EXPECT_EQ(val, f->get()->GetObj(trg));
+ EXPECT_EQ(val, f->Get()->GetObj(trg));
}
#endif
-static void GetSetObjInstance(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtField>* f,
+static void GetSetObjInstance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f,
Thread* self, mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
- set_and_check_instance(f, obj->get(), nullptr, self, referrer, test);
+ set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test);
// Allocate a string object for simplicity.
mirror::String* str = mirror::String::AllocFromModifiedUtf8(self, "Test");
- set_and_check_instance(f, obj->get(), str, self, referrer, test);
+ set_and_check_instance(f, obj->Get(), str, self, referrer, test);
- set_and_check_instance(f, obj->get(), nullptr, self, referrer, test);
+ set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test);
#else
LOG(INFO) << "Skipping setObjinstance as I don't know how to do that on " << kRuntimeISA;
// Force-print to std::cout so it's also outside the logcat.
@@ -1260,7 +1246,7 @@ extern "C" void art_quick_set64_static(void);
extern "C" void art_quick_get64_static(void);
#endif
-static void GetSet64Static(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtField>* f, Thread* self,
+static void GetSet64Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__x86_64__) || defined(__aarch64__)
@@ -1295,7 +1281,7 @@ extern "C" void art_quick_set64_instance(void);
extern "C" void art_quick_get64_instance(void);
#endif
-static void GetSet64Instance(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtField>* f,
+static void GetSet64Instance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f,
Thread* self, mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__x86_64__) || defined(__aarch64__)
@@ -1304,20 +1290,20 @@ static void GetSet64Instance(SirtRef<mirror::Object>* obj, SirtRef<mirror::ArtFi
for (size_t i = 0; i < num_values; ++i) {
test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
- reinterpret_cast<size_t>(obj->get()),
+ reinterpret_cast<size_t>(obj->Get()),
static_cast<size_t>(values[i]),
reinterpret_cast<uintptr_t>(&art_quick_set64_instance),
self,
referrer);
- int64_t res = f->get()->GetLong(obj->get());
+ int64_t res = f->Get()->GetLong(obj->Get());
EXPECT_EQ(res, static_cast<int64_t>(values[i])) << "Iteration " << i;
res++;
- f->get()->SetLong<false>(obj->get(), res);
+ f->Get()->SetLong<false>(obj->Get(), res);
size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
- reinterpret_cast<size_t>(obj->get()),
+ reinterpret_cast<size_t>(obj->Get()),
0U,
reinterpret_cast<uintptr_t>(&art_quick_get64_instance),
self,
@@ -1341,41 +1327,41 @@ static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type)
CHECK(o != NULL);
ScopedObjectAccess soa(self);
- SirtRef<mirror::Object> obj(self, soa.Decode<mirror::Object*>(o));
-
- SirtRef<mirror::Class> c(self, obj->GetClass());
-
+ StackHandleScope<5> hs(self);
+ Handle<mirror::Object> obj(hs.NewHandle(soa.Decode<mirror::Object*>(o)));
+ Handle<mirror::Class> c(hs.NewHandle(obj->GetClass()));
// Need a method as a referrer
- SirtRef<mirror::ArtMethod> m(self, c->GetDirectMethod(0));
+ Handle<mirror::ArtMethod> m(hs.NewHandle(c->GetDirectMethod(0)));
// Play with it...
// Static fields.
{
- SirtRef<mirror::ObjectArray<mirror::ArtField>> fields(self, c.get()->GetSFields());
+ Handle<mirror::ObjectArray<mirror::ArtField>> fields(hs.NewHandle(c.Get()->GetSFields()));
int32_t num_fields = fields->GetLength();
for (int32_t i = 0; i < num_fields; ++i) {
- SirtRef<mirror::ArtField> f(self, fields->Get(i));
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ArtField> f(hs.NewHandle(fields->Get(i)));
- FieldHelper fh(f.get());
+ FieldHelper fh(f.Get());
Primitive::Type type = fh.GetTypeAsPrimitiveType();
switch (type) {
case Primitive::Type::kPrimInt:
if (test_type == type) {
- GetSet32Static(&obj, &f, self, m.get(), test);
+ GetSet32Static(&obj, &f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimLong:
if (test_type == type) {
- GetSet64Static(&obj, &f, self, m.get(), test);
+ GetSet64Static(&obj, &f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimNot:
// Don't try array.
if (test_type == type && fh.GetTypeDescriptor()[0] != '[') {
- GetSetObjStatic(&obj, &f, self, m.get(), test);
+ GetSetObjStatic(&obj, &f, self, m.Get(), test);
}
break;
@@ -1387,30 +1373,31 @@ static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type)
// Instance fields.
{
- SirtRef<mirror::ObjectArray<mirror::ArtField>> fields(self, c.get()->GetIFields());
+ Handle<mirror::ObjectArray<mirror::ArtField>> fields(hs.NewHandle(c.Get()->GetIFields()));
int32_t num_fields = fields->GetLength();
for (int32_t i = 0; i < num_fields; ++i) {
- SirtRef<mirror::ArtField> f(self, fields->Get(i));
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ArtField> f(hs.NewHandle(fields->Get(i)));
- FieldHelper fh(f.get());
+ FieldHelper fh(f.Get());
Primitive::Type type = fh.GetTypeAsPrimitiveType();
switch (type) {
case Primitive::Type::kPrimInt:
if (test_type == type) {
- GetSet32Instance(&obj, &f, self, m.get(), test);
+ GetSet32Instance(&obj, &f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimLong:
if (test_type == type) {
- GetSet64Instance(&obj, &f, self, m.get(), test);
+ GetSet64Instance(&obj, &f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimNot:
// Don't try array.
if (test_type == type && fh.GetTypeDescriptor()[0] != '[') {
- GetSetObjInstance(&obj, &f, self, m.get(), test);
+ GetSetObjInstance(&obj, &f, self, m.Get(), test);
}
break;
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index b886fb0..c0cbaea 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1053,12 +1053,12 @@ END_FUNCTION art_quick_resolution_trampoline
* | Return |
* | Callee-Save Data |
* #-------------------#
- * | SIRT |
+ * | handle scope |
* #-------------------#
* | Method* | <--- (1)
* #-------------------#
* | local ref cookie | // 4B
- * | SIRT size | // 4B TODO: roll into call stack alignment?
+ * | handle scope size | // 4B TODO: roll into call stack alignment?
* #-------------------#
* | JNI Call Stack |
* #-------------------# <--- SP on native call
@@ -1111,8 +1111,8 @@ DEFINE_FUNCTION art_quick_generic_jni_trampoline
//
// 4 local state ref
// 4 padding
- // 4196 4k scratch space, enough for 2x 256 8-byte parameters (TODO: SIRT overhead?)
- // 16 SIRT member fields ?
+ // 4196 4k scratch space, enough for 2x 256 8-byte parameters (TODO: handle scope overhead?)
+ // 16 handle scope member fields ?
// + 112 14x 8-byte stack-2-register space
// ------
// 4332
@@ -1217,7 +1217,7 @@ DEFINE_FUNCTION art_quick_generic_jni_trampoline
movq %rbx, %rsp
CFI_DEF_CFA_REGISTER(rsp)
.Lexception_in_native:
- // TODO: the SIRT contains the this pointer which is used by the debugger for exception
+ // TODO: the handle scope contains the this pointer which is used by the debugger for exception
// delivery.
movq %xmm0, 16(%rsp) // doesn't make sense!!!
movq 24(%rsp), %xmm1 // neither does this!!!
diff --git a/runtime/catch_block_stack_visitor.cc b/runtime/catch_block_stack_visitor.cc
index 8d10a97..b820276 100644
--- a/runtime/catch_block_stack_visitor.cc
+++ b/runtime/catch_block_stack_visitor.cc
@@ -19,7 +19,7 @@
#include "dex_instruction.h"
#include "mirror/art_method-inl.h"
#include "quick_exception_handler.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
#include "verifier/method_verifier.h"
namespace art {
@@ -50,7 +50,9 @@ bool CatchBlockStackVisitor::HandleTryItems(mirror::ArtMethod* method) {
}
if (dex_pc != DexFile::kDexNoIndex) {
bool clear_exception = false;
- uint32_t found_dex_pc = method->FindCatchBlock(to_find_, dex_pc, &clear_exception);
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::Class> to_find(hs.NewHandle((*exception_)->GetClass()));
+ uint32_t found_dex_pc = method->FindCatchBlock(to_find, dex_pc, &clear_exception);
exception_handler_->SetClearException(clear_exception);
if (found_dex_pc != DexFile::kDexNoIndex) {
exception_handler_->SetHandlerDexPc(found_dex_pc);
diff --git a/runtime/catch_block_stack_visitor.h b/runtime/catch_block_stack_visitor.h
index 6f0fe11..f45cf03 100644
--- a/runtime/catch_block_stack_visitor.h
+++ b/runtime/catch_block_stack_visitor.h
@@ -19,7 +19,7 @@
#include "mirror/object-inl.h"
#include "stack.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
namespace art {
@@ -34,10 +34,10 @@ class ThrowLocation;
// Finds catch handler or prepares deoptimization.
class CatchBlockStackVisitor FINAL : public StackVisitor {
public:
- CatchBlockStackVisitor(Thread* self, Context* context, SirtRef<mirror::Throwable>& exception,
+ CatchBlockStackVisitor(Thread* self, Context* context, Handle<mirror::Throwable>* exception,
QuickExceptionHandler* exception_handler)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(self, context), self_(self), to_find_(self, exception->GetClass()),
+ : StackVisitor(self, context), self_(self), exception_(exception),
exception_handler_(exception_handler) {
}
@@ -48,7 +48,7 @@ class CatchBlockStackVisitor FINAL : public StackVisitor {
Thread* const self_;
// The type of the exception catch block to find.
- SirtRef<mirror::Class> to_find_;
+ Handle<mirror::Throwable>* exception_;
QuickExceptionHandler* const exception_handler_;
DISALLOW_COPY_AND_ASSIGN(CatchBlockStackVisitor);
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index 3df050e..0d5a805 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -86,9 +86,9 @@ void JniAbortF(const char* jni_function_name, const char* fmt, ...) {
* ===========================================================================
*/
-static bool IsSirtLocalRef(JNIEnv* env, jobject localRef) {
- return GetIndirectRefKind(localRef) == kSirtOrInvalid &&
- reinterpret_cast<JNIEnvExt*>(env)->self->SirtContains(localRef);
+static bool IsHandleScopeLocalRef(JNIEnv* env, jobject localRef) {
+ return GetIndirectRefKind(localRef) == kHandleScopeOrInvalid &&
+ reinterpret_cast<JNIEnvExt*>(env)->self->HandleScopeContains(localRef);
}
// Flags passed into ScopedCheck.
@@ -1243,7 +1243,7 @@ class CheckJNI {
static void DeleteLocalRef(JNIEnv* env, jobject localRef) {
CHECK_JNI_ENTRY(kFlag_Default | kFlag_ExcepOkay, "EL", env, localRef);
- if (localRef != nullptr && GetIndirectRefKind(localRef) != kLocal && !IsSirtLocalRef(env, localRef)) {
+ if (localRef != nullptr && GetIndirectRefKind(localRef) != kLocal && !IsHandleScopeLocalRef(env, localRef)) {
JniAbortF(__FUNCTION__, "DeleteLocalRef on %s: %p",
ToStr<IndirectRefKind>(GetIndirectRefKind(localRef)).c_str(), localRef);
} else {
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index df88794..ce634e0 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -24,7 +24,7 @@
#include "mirror/iftable.h"
#include "mirror/object_array.h"
#include "object_utils.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
namespace art {
@@ -34,7 +34,8 @@ inline bool ClassLinker::IsInBootClassPath(const char* descriptor) {
}
inline mirror::Class* ClassLinker::FindSystemClass(Thread* self, const char* descriptor) {
- SirtRef<mirror::ClassLoader> class_loader(self, nullptr);
+ StackHandleScope<1> hs(self);
+ auto class_loader = hs.NewHandle<mirror::ClassLoader>(nullptr);
return FindClass(self, descriptor, class_loader);
}
@@ -49,7 +50,8 @@ inline mirror::Class* ClassLinker::FindArrayClass(Thread* self, mirror::Class* e
DCHECK(!element_class->IsPrimitiveVoid());
std::string descriptor("[");
descriptor += ClassHelper(element_class).GetDescriptor();
- SirtRef<mirror::ClassLoader> class_loader(self, element_class->GetClassLoader());
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(element_class->GetClassLoader()));
mirror::Class* array_class = FindClass(self, descriptor.c_str(), class_loader);
// Benign races in storing array class and incrementing index.
size_t victim_index = find_array_class_cache_next_victim_;
@@ -63,7 +65,8 @@ inline mirror::String* ClassLinker::ResolveString(uint32_t string_idx,
mirror::String* resolved_string = referrer->GetDexCacheStrings()->Get(string_idx);
if (UNLIKELY(resolved_string == NULL)) {
mirror::Class* declaring_class = referrer->GetDeclaringClass();
- SirtRef<mirror::DexCache> dex_cache(Thread::Current(), declaring_class->GetDexCache());
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
const DexFile& dex_file = *dex_cache->GetDexFile();
resolved_string = ResolveString(dex_file, string_idx, dex_cache);
if (resolved_string != nullptr) {
@@ -76,11 +79,11 @@ inline mirror::String* ClassLinker::ResolveString(uint32_t string_idx,
inline mirror::Class* ClassLinker::ResolveType(uint16_t type_idx,
mirror::ArtMethod* referrer) {
mirror::Class* resolved_type = referrer->GetDexCacheResolvedTypes()->Get(type_idx);
- if (UNLIKELY(resolved_type == NULL)) {
+ if (UNLIKELY(resolved_type == nullptr)) {
mirror::Class* declaring_class = referrer->GetDeclaringClass();
- Thread* self = Thread::Current();
- SirtRef<mirror::DexCache> dex_cache(self, declaring_class->GetDexCache());
- SirtRef<mirror::ClassLoader> class_loader(self, declaring_class->GetClassLoader());
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
const DexFile& dex_file = *dex_cache->GetDexFile();
resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader);
if (resolved_type != nullptr) {
@@ -95,9 +98,9 @@ inline mirror::Class* ClassLinker::ResolveType(uint16_t type_idx, mirror::ArtFie
mirror::DexCache* dex_cache_ptr = declaring_class->GetDexCache();
mirror::Class* resolved_type = dex_cache_ptr->GetResolvedType(type_idx);
if (UNLIKELY(resolved_type == NULL)) {
- Thread* self = Thread::Current();
- SirtRef<mirror::DexCache> dex_cache(self, dex_cache_ptr);
- SirtRef<mirror::ClassLoader> class_loader(self, declaring_class->GetClassLoader());
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(dex_cache_ptr));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
const DexFile& dex_file = *dex_cache->GetDexFile();
resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader);
if (resolved_type != nullptr) {
@@ -114,9 +117,9 @@ inline mirror::ArtMethod* ClassLinker::ResolveMethod(uint32_t method_idx,
referrer->GetDexCacheResolvedMethods()->Get(method_idx);
if (UNLIKELY(resolved_method == NULL || resolved_method->IsRuntimeMethod())) {
mirror::Class* declaring_class = referrer->GetDeclaringClass();
- Thread* self = Thread::Current();
- SirtRef<mirror::DexCache> dex_cache(self, declaring_class->GetDexCache());
- SirtRef<mirror::ClassLoader> class_loader(self, declaring_class->GetClassLoader());
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
const DexFile& dex_file = *dex_cache->GetDexFile();
resolved_method = ResolveMethod(dex_file, method_idx, dex_cache, class_loader, referrer, type);
if (resolved_method != nullptr) {
@@ -133,9 +136,9 @@ inline mirror::ArtField* ClassLinker::ResolveField(uint32_t field_idx,
mirror::ArtField* resolved_field =
declaring_class->GetDexCache()->GetResolvedField(field_idx);
if (UNLIKELY(resolved_field == NULL)) {
- Thread* self = Thread::Current();
- SirtRef<mirror::DexCache> dex_cache(self, declaring_class->GetDexCache());
- SirtRef<mirror::ClassLoader> class_loader(self, declaring_class->GetClassLoader());
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
const DexFile& dex_file = *dex_cache->GetDexFile();
resolved_field = ResolveField(dex_file, field_idx, dex_cache, class_loader, is_static);
if (resolved_field != nullptr) {
@@ -174,9 +177,8 @@ inline mirror::IfTable* ClassLinker::AllocIfTable(Thread* self, size_t ifcount)
inline mirror::ObjectArray<mirror::ArtField>* ClassLinker::AllocArtFieldArray(Thread* self,
size_t length) {
- return mirror::ObjectArray<mirror::ArtField>::Alloc(self,
- GetClassRoot(kJavaLangReflectArtFieldArrayClass),
- length);
+ return mirror::ObjectArray<mirror::ArtField>::Alloc(
+ self, GetClassRoot(kJavaLangReflectArtFieldArrayClass), length);
}
inline mirror::Class* ClassLinker::GetClassRoot(ClassRoot class_root)
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index b2d8b37..3d268e4 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -37,6 +37,7 @@
#include "gc/accounting/heap_bitmap.h"
#include "gc/heap.h"
#include "gc/space/image_space.h"
+#include "handle_scope.h"
#include "intern_table.h"
#include "interpreter/interpreter.h"
#include "leb128.h"
@@ -59,8 +60,7 @@
#include "entrypoints/entrypoint_utils.h"
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change.h"
-#include "sirt_ref.h"
-#include "stack_indirect_reference_table.h"
+#include "handle_scope-inl.h"
#include "thread.h"
#include "UniquePtr.h"
#include "utils.h"
@@ -202,11 +202,12 @@ void ClassLinker::InitFromCompiler(const std::vector<const DexFile*>& boot_class
gc::Heap* heap = Runtime::Current()->GetHeap();
// The GC can't handle an object with a null class since we can't get the size of this object.
heap->IncrementDisableMovingGC(self);
- SirtRef<mirror::Class> java_lang_Class(self, down_cast<mirror::Class*>(
- heap->AllocNonMovableObject<true>(self, nullptr, sizeof(mirror::ClassClass), VoidFunctor())));
- CHECK(java_lang_Class.get() != NULL);
- mirror::Class::SetClassClass(java_lang_Class.get());
- java_lang_Class->SetClass(java_lang_Class.get());
+ StackHandleScope<64> hs(self); // 64 is picked arbitrarily.
+ Handle<mirror::Class> java_lang_Class(hs.NewHandle(down_cast<mirror::Class*>(
+ heap->AllocNonMovableObject<true>(self, nullptr, sizeof(mirror::ClassClass), VoidFunctor()))));
+ CHECK(java_lang_Class.Get() != NULL);
+ mirror::Class::SetClassClass(java_lang_Class.Get());
+ java_lang_Class->SetClass(java_lang_Class.Get());
if (kUseBakerOrBrooksReadBarrier) {
java_lang_Class->AssertReadBarrierPointer();
}
@@ -215,44 +216,47 @@ void ClassLinker::InitFromCompiler(const std::vector<const DexFile*>& boot_class
// AllocClass(mirror::Class*) can now be used
// Class[] is used for reflection support.
- SirtRef<mirror::Class> class_array_class(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class)));
- class_array_class->SetComponentType(java_lang_Class.get());
+ Handle<mirror::Class> class_array_class(
+ hs.NewHandle(AllocClass(self, java_lang_Class.Get(), sizeof(mirror::Class))));
+ class_array_class->SetComponentType(java_lang_Class.Get());
// java_lang_Object comes next so that object_array_class can be created.
- SirtRef<mirror::Class> java_lang_Object(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class)));
- CHECK(java_lang_Object.get() != NULL);
+ Handle<mirror::Class> java_lang_Object(
+ hs.NewHandle(AllocClass(self, java_lang_Class.Get(), sizeof(mirror::Class))));
+ CHECK(java_lang_Object.Get() != NULL);
// backfill Object as the super class of Class.
- java_lang_Class->SetSuperClass(java_lang_Object.get());
+ java_lang_Class->SetSuperClass(java_lang_Object.Get());
java_lang_Object->SetStatus(mirror::Class::kStatusLoaded, self);
// Object[] next to hold class roots.
- SirtRef<mirror::Class> object_array_class(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class)));
- object_array_class->SetComponentType(java_lang_Object.get());
+ Handle<mirror::Class> object_array_class(
+ hs.NewHandle(AllocClass(self, java_lang_Class.Get(), sizeof(mirror::Class))));
+ object_array_class->SetComponentType(java_lang_Object.Get());
// Setup the char class to be used for char[].
- SirtRef<mirror::Class> char_class(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class)));
+ Handle<mirror::Class> char_class(hs.NewHandle(AllocClass(self, java_lang_Class.Get(), sizeof(mirror::Class))));
// Setup the char[] class to be used for String.
- SirtRef<mirror::Class> char_array_class(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class)));
- char_array_class->SetComponentType(char_class.get());
- mirror::CharArray::SetArrayClass(char_array_class.get());
+ Handle<mirror::Class> char_array_class(hs.NewHandle(AllocClass(self, java_lang_Class.Get(), sizeof(mirror::Class))));
+ char_array_class->SetComponentType(char_class.Get());
+ mirror::CharArray::SetArrayClass(char_array_class.Get());
// Setup String.
- SirtRef<mirror::Class> java_lang_String(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::StringClass)));
- mirror::String::SetClass(java_lang_String.get());
+ Handle<mirror::Class> java_lang_String(hs.NewHandle(AllocClass(self, java_lang_Class.Get(), sizeof(mirror::StringClass))));
+ mirror::String::SetClass(java_lang_String.Get());
java_lang_String->SetObjectSize(sizeof(mirror::String));
java_lang_String->SetStatus(mirror::Class::kStatusResolved, self);
// Create storage for root classes, save away our work so far (requires descriptors).
- class_roots_ = mirror::ObjectArray<mirror::Class>::Alloc(self, object_array_class.get(),
+ class_roots_ = mirror::ObjectArray<mirror::Class>::Alloc(self, object_array_class.Get(),
kClassRootsMax);
CHECK(class_roots_ != NULL);
- SetClassRoot(kJavaLangClass, java_lang_Class.get());
- SetClassRoot(kJavaLangObject, java_lang_Object.get());
- SetClassRoot(kClassArrayClass, class_array_class.get());
- SetClassRoot(kObjectArrayClass, object_array_class.get());
- SetClassRoot(kCharArrayClass, char_array_class.get());
- SetClassRoot(kJavaLangString, java_lang_String.get());
+ SetClassRoot(kJavaLangClass, java_lang_Class.Get());
+ SetClassRoot(kJavaLangObject, java_lang_Object.Get());
+ SetClassRoot(kClassArrayClass, class_array_class.Get());
+ SetClassRoot(kObjectArrayClass, object_array_class.Get());
+ SetClassRoot(kCharArrayClass, char_array_class.Get());
+ SetClassRoot(kJavaLangString, java_lang_String.Get());
// Setup the primitive type classes.
SetClassRoot(kPrimitiveBoolean, CreatePrimitiveClass(self, Primitive::kPrimBoolean));
@@ -268,53 +272,54 @@ void ClassLinker::InitFromCompiler(const std::vector<const DexFile*>& boot_class
array_iftable_ = AllocIfTable(self, 2);
// Create int array type for AllocDexCache (done in AppendToBootClassPath).
- SirtRef<mirror::Class> int_array_class(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::Class)));
+ Handle<mirror::Class> int_array_class(
+ hs.NewHandle(AllocClass(self, java_lang_Class.Get(), sizeof(mirror::Class))));
int_array_class->SetComponentType(GetClassRoot(kPrimitiveInt));
- mirror::IntArray::SetArrayClass(int_array_class.get());
- SetClassRoot(kIntArrayClass, int_array_class.get());
+ mirror::IntArray::SetArrayClass(int_array_class.Get());
+ SetClassRoot(kIntArrayClass, int_array_class.Get());
// now that these are registered, we can use AllocClass() and AllocObjectArray
// Set up DexCache. This cannot be done later since AppendToBootClassPath calls AllocDexCache.
- SirtRef<mirror::Class>
- java_lang_DexCache(self, AllocClass(self, java_lang_Class.get(), sizeof(mirror::DexCacheClass)));
- SetClassRoot(kJavaLangDexCache, java_lang_DexCache.get());
+ Handle<mirror::Class> java_lang_DexCache(
+ hs.NewHandle(AllocClass(self, java_lang_Class.Get(), sizeof(mirror::DexCacheClass))));
+ SetClassRoot(kJavaLangDexCache, java_lang_DexCache.Get());
java_lang_DexCache->SetObjectSize(sizeof(mirror::DexCache));
java_lang_DexCache->SetStatus(mirror::Class::kStatusResolved, self);
// Constructor, Field, Method, and AbstractMethod are necessary so that FindClass can link members.
- SirtRef<mirror::Class> java_lang_reflect_ArtField(self, AllocClass(self, java_lang_Class.get(),
- sizeof(mirror::ArtFieldClass)));
- CHECK(java_lang_reflect_ArtField.get() != NULL);
+ Handle<mirror::Class> java_lang_reflect_ArtField(
+ hs.NewHandle(AllocClass(self, java_lang_Class.Get(), sizeof(mirror::ArtFieldClass))));
+ CHECK(java_lang_reflect_ArtField.Get() != NULL);
java_lang_reflect_ArtField->SetObjectSize(sizeof(mirror::ArtField));
- SetClassRoot(kJavaLangReflectArtField, java_lang_reflect_ArtField.get());
+ SetClassRoot(kJavaLangReflectArtField, java_lang_reflect_ArtField.Get());
java_lang_reflect_ArtField->SetStatus(mirror::Class::kStatusResolved, self);
- mirror::ArtField::SetClass(java_lang_reflect_ArtField.get());
+ mirror::ArtField::SetClass(java_lang_reflect_ArtField.Get());
- SirtRef<mirror::Class> java_lang_reflect_ArtMethod(self, AllocClass(self, java_lang_Class.get(),
- sizeof(mirror::ArtMethodClass)));
- CHECK(java_lang_reflect_ArtMethod.get() != NULL);
+ Handle<mirror::Class> java_lang_reflect_ArtMethod(
+ hs.NewHandle(AllocClass(self, java_lang_Class.Get(), sizeof(mirror::ArtMethodClass))));
+ CHECK(java_lang_reflect_ArtMethod.Get() != NULL);
java_lang_reflect_ArtMethod->SetObjectSize(sizeof(mirror::ArtMethod));
- SetClassRoot(kJavaLangReflectArtMethod, java_lang_reflect_ArtMethod.get());
+ SetClassRoot(kJavaLangReflectArtMethod, java_lang_reflect_ArtMethod.Get());
java_lang_reflect_ArtMethod->SetStatus(mirror::Class::kStatusResolved, self);
- mirror::ArtMethod::SetClass(java_lang_reflect_ArtMethod.get());
+ mirror::ArtMethod::SetClass(java_lang_reflect_ArtMethod.Get());
// Set up array classes for string, field, method
- SirtRef<mirror::Class> object_array_string(self, AllocClass(self, java_lang_Class.get(),
- sizeof(mirror::Class)));
- object_array_string->SetComponentType(java_lang_String.get());
- SetClassRoot(kJavaLangStringArrayClass, object_array_string.get());
+ Handle<mirror::Class> object_array_string(
+ hs.NewHandle(AllocClass(self, java_lang_Class.Get(), sizeof(mirror::Class))));
+ object_array_string->SetComponentType(java_lang_String.Get());
+ SetClassRoot(kJavaLangStringArrayClass, object_array_string.Get());
- SirtRef<mirror::Class> object_array_art_method(self, AllocClass(self, java_lang_Class.get(),
- sizeof(mirror::Class)));
- object_array_art_method->SetComponentType(java_lang_reflect_ArtMethod.get());
- SetClassRoot(kJavaLangReflectArtMethodArrayClass, object_array_art_method.get());
+ Handle<mirror::Class> object_array_art_method(
+ hs.NewHandle(AllocClass(self, java_lang_Class.Get(), sizeof(mirror::Class))));
+ object_array_art_method->SetComponentType(java_lang_reflect_ArtMethod.Get());
+ SetClassRoot(kJavaLangReflectArtMethodArrayClass, object_array_art_method.Get());
- SirtRef<mirror::Class> object_array_art_field(self, AllocClass(self, java_lang_Class.get(),
- sizeof(mirror::Class)));
- object_array_art_field->SetComponentType(java_lang_reflect_ArtField.get());
- SetClassRoot(kJavaLangReflectArtFieldArrayClass, object_array_art_field.get());
+ Handle<mirror::Class> object_array_art_field(
+ hs.NewHandle(AllocClass(self, java_lang_Class.Get(), sizeof(mirror::Class))));
+ object_array_art_field->SetComponentType(java_lang_reflect_ArtField.Get());
+ SetClassRoot(kJavaLangReflectArtFieldArrayClass, object_array_art_field.Get());
// Setup boot_class_path_ and register class_path now that we can use AllocObjectArray to create
// DexCache instances. Needs to be after String, Field, Method arrays since AllocDexCache uses
@@ -329,8 +334,8 @@ void ClassLinker::InitFromCompiler(const std::vector<const DexFile*>& boot_class
// now we can use FindSystemClass
// run char class through InitializePrimitiveClass to finish init
- InitializePrimitiveClass(char_class.get(), Primitive::kPrimChar);
- SetClassRoot(kPrimitiveChar, char_class.get()); // needs descriptor
+ InitializePrimitiveClass(char_class.Get(), Primitive::kPrimChar);
+ SetClassRoot(kPrimitiveChar, char_class.Get()); // needs descriptor
// Create runtime resolution and imt conflict methods. Also setup the default imt.
Runtime* runtime = Runtime::Current();
@@ -341,16 +346,16 @@ void ClassLinker::InitFromCompiler(const std::vector<const DexFile*>& boot_class
// Object, String and DexCache need to be rerun through FindSystemClass to finish init
java_lang_Object->SetStatus(mirror::Class::kStatusNotReady, self);
mirror::Class* Object_class = FindSystemClass(self, "Ljava/lang/Object;");
- CHECK_EQ(java_lang_Object.get(), Object_class);
+ CHECK_EQ(java_lang_Object.Get(), Object_class);
CHECK_EQ(java_lang_Object->GetObjectSize(), sizeof(mirror::Object));
java_lang_String->SetStatus(mirror::Class::kStatusNotReady, self);
mirror::Class* String_class = FindSystemClass(self, "Ljava/lang/String;");
- CHECK_EQ(java_lang_String.get(), String_class);
+ CHECK_EQ(java_lang_String.Get(), String_class);
CHECK_EQ(java_lang_String->GetObjectSize(), sizeof(mirror::String));
java_lang_DexCache->SetStatus(mirror::Class::kStatusNotReady, self);
mirror::Class* DexCache_class = FindSystemClass(self, "Ljava/lang/DexCache;");
- CHECK_EQ(java_lang_String.get(), String_class);
- CHECK_EQ(java_lang_DexCache.get(), DexCache_class);
+ CHECK_EQ(java_lang_String.Get(), String_class);
+ CHECK_EQ(java_lang_DexCache.Get(), DexCache_class);
CHECK_EQ(java_lang_DexCache->GetObjectSize(), sizeof(mirror::DexCache));
// Setup the primitive array type classes - can't be done until Object has a vtable.
@@ -361,13 +366,13 @@ void ClassLinker::InitFromCompiler(const std::vector<const DexFile*>& boot_class
mirror::ByteArray::SetArrayClass(GetClassRoot(kByteArrayClass));
mirror::Class* found_char_array_class = FindSystemClass(self, "[C");
- CHECK_EQ(char_array_class.get(), found_char_array_class);
+ CHECK_EQ(char_array_class.Get(), found_char_array_class);
SetClassRoot(kShortArrayClass, FindSystemClass(self, "[S"));
mirror::ShortArray::SetArrayClass(GetClassRoot(kShortArrayClass));
mirror::Class* found_int_array_class = FindSystemClass(self, "[I");
- CHECK_EQ(int_array_class.get(), found_int_array_class);
+ CHECK_EQ(int_array_class.Get(), found_int_array_class);
SetClassRoot(kLongArrayClass, FindSystemClass(self, "[J"));
mirror::LongArray::SetArrayClass(GetClassRoot(kLongArrayClass));
@@ -379,10 +384,10 @@ void ClassLinker::InitFromCompiler(const std::vector<const DexFile*>& boot_class
mirror::DoubleArray::SetArrayClass(GetClassRoot(kDoubleArrayClass));
mirror::Class* found_class_array_class = FindSystemClass(self, "[Ljava/lang/Class;");
- CHECK_EQ(class_array_class.get(), found_class_array_class);
+ CHECK_EQ(class_array_class.Get(), found_class_array_class);
mirror::Class* found_object_array_class = FindSystemClass(self, "[Ljava/lang/Object;");
- CHECK_EQ(object_array_class.get(), found_object_array_class);
+ CHECK_EQ(object_array_class.Get(), found_object_array_class);
// Setup the single, global copy of "iftable".
mirror::Class* java_lang_Cloneable = FindSystemClass(self, "Ljava/lang/Cloneable;");
@@ -395,35 +400,35 @@ void ClassLinker::InitFromCompiler(const std::vector<const DexFile*>& boot_class
array_iftable_->SetInterface(1, java_io_Serializable);
// Sanity check Class[] and Object[]'s interfaces.
- ClassHelper kh(class_array_class.get());
+ ClassHelper kh(class_array_class.Get());
CHECK_EQ(java_lang_Cloneable, kh.GetDirectInterface(0));
CHECK_EQ(java_io_Serializable, kh.GetDirectInterface(1));
- kh.ChangeClass(object_array_class.get());
+ kh.ChangeClass(object_array_class.Get());
CHECK_EQ(java_lang_Cloneable, kh.GetDirectInterface(0));
CHECK_EQ(java_io_Serializable, kh.GetDirectInterface(1));
// Run Class, ArtField, and ArtMethod through FindSystemClass. This initializes their
// dex_cache_ fields and register them in class_table_.
mirror::Class* Class_class = FindSystemClass(self, "Ljava/lang/Class;");
- CHECK_EQ(java_lang_Class.get(), Class_class);
+ CHECK_EQ(java_lang_Class.Get(), Class_class);
java_lang_reflect_ArtMethod->SetStatus(mirror::Class::kStatusNotReady, self);
mirror::Class* Art_method_class = FindSystemClass(self, "Ljava/lang/reflect/ArtMethod;");
- CHECK_EQ(java_lang_reflect_ArtMethod.get(), Art_method_class);
+ CHECK_EQ(java_lang_reflect_ArtMethod.Get(), Art_method_class);
java_lang_reflect_ArtField->SetStatus(mirror::Class::kStatusNotReady, self);
mirror::Class* Art_field_class = FindSystemClass(self, "Ljava/lang/reflect/ArtField;");
- CHECK_EQ(java_lang_reflect_ArtField.get(), Art_field_class);
+ CHECK_EQ(java_lang_reflect_ArtField.Get(), Art_field_class);
mirror::Class* String_array_class = FindSystemClass(self, class_roots_descriptors_[kJavaLangStringArrayClass]);
- CHECK_EQ(object_array_string.get(), String_array_class);
+ CHECK_EQ(object_array_string.Get(), String_array_class);
mirror::Class* Art_method_array_class =
FindSystemClass(self, class_roots_descriptors_[kJavaLangReflectArtMethodArrayClass]);
- CHECK_EQ(object_array_art_method.get(), Art_method_array_class);
+ CHECK_EQ(object_array_art_method.Get(), Art_method_array_class);
mirror::Class* Art_field_array_class =
FindSystemClass(self, class_roots_descriptors_[kJavaLangReflectArtFieldArrayClass]);
- CHECK_EQ(object_array_art_field.get(), Art_field_array_class);
+ CHECK_EQ(object_array_art_field.Get(), Art_field_array_class);
// End of special init trickery, subsequent classes may be loaded via FindSystemClass.
@@ -529,8 +534,9 @@ void ClassLinker::RunRootClinits() {
for (size_t i = 0; i < ClassLinker::kClassRootsMax; ++i) {
mirror::Class* c = GetClassRoot(ClassRoot(i));
if (!c->IsArrayClass() && !c->IsPrimitive()) {
- SirtRef<mirror::Class> sirt_class(self, GetClassRoot(ClassRoot(i)));
- EnsureInitialized(sirt_class, true, true);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_class(hs.NewHandle(GetClassRoot(ClassRoot(i))));
+ EnsureInitialized(h_class, true, true);
self->AssertNoPendingException();
}
}
@@ -1027,10 +1033,11 @@ void ClassLinker::InitFromImage() {
mirror::ObjectArray<mirror::DexCache>* dex_caches =
dex_caches_object->AsObjectArray<mirror::DexCache>();
- SirtRef<mirror::ObjectArray<mirror::Class> > class_roots(
- self,
- space->GetImageHeader().GetImageRoot(ImageHeader::kClassRoots)->AsObjectArray<mirror::Class>());
- class_roots_ = class_roots.get();
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ObjectArray<mirror::Class>> class_roots(hs.NewHandle(
+ space->GetImageHeader().GetImageRoot(ImageHeader::kClassRoots)->
+ AsObjectArray<mirror::Class>()));
+ class_roots_ = class_roots.Get();
// Special case of setting up the String class early so that we can test arbitrary objects
// as being Strings or not
@@ -1039,7 +1046,8 @@ void ClassLinker::InitFromImage() {
CHECK_EQ(oat_file.GetOatHeader().GetDexFileCount(),
static_cast<uint32_t>(dex_caches->GetLength()));
for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
- SirtRef<mirror::DexCache> dex_cache(self, dex_caches->Get(i));
+ StackHandleScope<1> hs(self);
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(dex_caches->Get(i)));
const std::string& dex_file_location(dex_cache->GetLocation()->ToModifiedUtf8());
const OatFile::OatDexFile* oat_dex_file = oat_file.GetOatDexFile(dex_file_location.c_str(),
nullptr);
@@ -1069,7 +1077,7 @@ void ClassLinker::InitFromImage() {
// reinit class_roots_
mirror::Class::SetClassClass(class_roots->Get(kJavaLangClass));
- class_roots_ = class_roots.get();
+ class_roots_ = class_roots.Get();
// reinit array_iftable_ from any array class instance, they should be ==
array_iftable_ = GetClassRoot(kObjectArrayClass)->GetIfTable();
@@ -1212,42 +1220,43 @@ ClassLinker::~ClassLinker() {
mirror::DexCache* ClassLinker::AllocDexCache(Thread* self, const DexFile& dex_file) {
gc::Heap* heap = Runtime::Current()->GetHeap();
- SirtRef<mirror::Class> dex_cache_class(self, GetClassRoot(kJavaLangDexCache));
- SirtRef<mirror::DexCache> dex_cache(
- self, down_cast<mirror::DexCache*>(
- heap->AllocObject<true>(self, dex_cache_class.get(), dex_cache_class->GetObjectSize(),
- VoidFunctor())));
- if (dex_cache.get() == NULL) {
+ StackHandleScope<16> hs(self);
+ Handle<mirror::Class> dex_cache_class(hs.NewHandle(GetClassRoot(kJavaLangDexCache)));
+ Handle<mirror::DexCache> dex_cache(
+ hs.NewHandle(down_cast<mirror::DexCache*>(
+ heap->AllocObject<true>(self, dex_cache_class.Get(), dex_cache_class->GetObjectSize(),
+ VoidFunctor()))));
+ if (dex_cache.Get() == NULL) {
return NULL;
}
- SirtRef<mirror::String>
- location(self, intern_table_->InternStrong(dex_file.GetLocation().c_str()));
- if (location.get() == NULL) {
+ Handle<mirror::String>
+ location(hs.NewHandle(intern_table_->InternStrong(dex_file.GetLocation().c_str())));
+ if (location.Get() == NULL) {
return NULL;
}
- SirtRef<mirror::ObjectArray<mirror::String> >
- strings(self, AllocStringArray(self, dex_file.NumStringIds()));
- if (strings.get() == NULL) {
+ Handle<mirror::ObjectArray<mirror::String> >
+ strings(hs.NewHandle(AllocStringArray(self, dex_file.NumStringIds())));
+ if (strings.Get() == NULL) {
return NULL;
}
- SirtRef<mirror::ObjectArray<mirror::Class> >
- types(self, AllocClassArray(self, dex_file.NumTypeIds()));
- if (types.get() == NULL) {
+ Handle<mirror::ObjectArray<mirror::Class> >
+ types(hs.NewHandle(AllocClassArray(self, dex_file.NumTypeIds())));
+ if (types.Get() == NULL) {
return NULL;
}
- SirtRef<mirror::ObjectArray<mirror::ArtMethod> >
- methods(self, AllocArtMethodArray(self, dex_file.NumMethodIds()));
- if (methods.get() == NULL) {
+ Handle<mirror::ObjectArray<mirror::ArtMethod> >
+ methods(hs.NewHandle(AllocArtMethodArray(self, dex_file.NumMethodIds())));
+ if (methods.Get() == NULL) {
return NULL;
}
- SirtRef<mirror::ObjectArray<mirror::ArtField> >
- fields(self, AllocArtFieldArray(self, dex_file.NumFieldIds()));
- if (fields.get() == NULL) {
+ Handle<mirror::ObjectArray<mirror::ArtField> >
+ fields(hs.NewHandle(AllocArtFieldArray(self, dex_file.NumFieldIds())));
+ if (fields.Get() == NULL) {
return NULL;
}
- dex_cache->Init(&dex_file, location.get(), strings.get(), types.get(), methods.get(),
- fields.get());
- return dex_cache.get();
+ dex_cache->Init(&dex_file, location.Get(), strings.Get(), types.Get(), methods.Get(),
+ fields.Get());
+ return dex_cache.Get();
}
// Used to initialize a class in the allocation code path to ensure it is guarded by a StoreStore
@@ -1315,19 +1324,20 @@ static mirror::Class* EnsureResolved(Thread* self, mirror::Class* klass)
DCHECK(klass != NULL);
// Wait for the class if it has not already been linked.
if (!klass->IsResolved() && !klass->IsErroneous()) {
- SirtRef<mirror::Class> sirt_class(self, klass);
- ObjectLock<mirror::Class> lock(self, &sirt_class);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_class(hs.NewHandle(klass));
+ ObjectLock<mirror::Class> lock(self, &h_class);
// Check for circular dependencies between classes.
- if (!sirt_class->IsResolved() && sirt_class->GetClinitThreadId() == self->GetTid()) {
- ThrowClassCircularityError(sirt_class.get());
- sirt_class->SetStatus(mirror::Class::kStatusError, self);
+ if (!h_class->IsResolved() && h_class->GetClinitThreadId() == self->GetTid()) {
+ ThrowClassCircularityError(h_class.Get());
+ h_class->SetStatus(mirror::Class::kStatusError, self);
return nullptr;
}
// Wait for the pending initialization to complete.
- while (!sirt_class->IsResolved() && !sirt_class->IsErroneous()) {
+ while (!h_class->IsResolved() && !h_class->IsErroneous()) {
lock.WaitIgnoringInterrupts();
}
- klass = sirt_class.get();
+ klass = h_class.Get();
}
if (klass->IsErroneous()) {
ThrowEarlierClassFailure(klass);
@@ -1340,7 +1350,7 @@ static mirror::Class* EnsureResolved(Thread* self, mirror::Class* klass)
}
mirror::Class* ClassLinker::FindClass(Thread* self, const char* descriptor,
- const SirtRef<mirror::ClassLoader>& class_loader) {
+ const Handle<mirror::ClassLoader>& class_loader) {
DCHECK_NE(*descriptor, '\0') << "descriptor is empty string";
DCHECK(self != nullptr);
self->AssertNoPendingException();
@@ -1350,17 +1360,18 @@ mirror::Class* ClassLinker::FindClass(Thread* self, const char* descriptor,
return FindPrimitiveClass(descriptor[0]);
}
// Find the class in the loaded classes table.
- mirror::Class* klass = LookupClass(descriptor, class_loader.get());
+ mirror::Class* klass = LookupClass(descriptor, class_loader.Get());
if (klass != NULL) {
return EnsureResolved(self, klass);
}
// Class is not yet loaded.
if (descriptor[0] == '[') {
return CreateArrayClass(self, descriptor, class_loader);
- } else if (class_loader.get() == nullptr) {
+ } else if (class_loader.Get() == nullptr) {
DexFile::ClassPathEntry pair = DexFile::FindInClassPath(descriptor, boot_class_path_);
if (pair.second != NULL) {
- SirtRef<mirror::ClassLoader> class_loader(self, nullptr);
+ StackHandleScope<1> hs(self);
+ auto class_loader = hs.NewHandle<mirror::ClassLoader>(nullptr);
return DefineClass(descriptor, class_loader, *pair.first, *pair.second);
}
} else if (Runtime::Current()->UseCompileTimeClassPath()) {
@@ -1376,7 +1387,7 @@ mirror::Class* ClassLinker::FindClass(Thread* self, const char* descriptor,
{
ScopedObjectAccessUnchecked soa(self);
ScopedLocalRef<jobject> jclass_loader(soa.Env(),
- soa.AddLocalReference<jobject>(class_loader.get()));
+ soa.AddLocalReference<jobject>(class_loader.Get()));
class_path = &Runtime::Current()->GetCompileTimeClassPath(jclass_loader.get());
}
@@ -1388,7 +1399,7 @@ mirror::Class* ClassLinker::FindClass(Thread* self, const char* descriptor,
} else {
ScopedObjectAccessUnchecked soa(self);
ScopedLocalRef<jobject> class_loader_object(soa.Env(),
- soa.AddLocalReference<jobject>(class_loader.get()));
+ soa.AddLocalReference<jobject>(class_loader.Get()));
std::string class_name_string(DescriptorToDot(descriptor));
ScopedLocalRef<jobject> result(soa.Env(), NULL);
{
@@ -1422,38 +1433,39 @@ mirror::Class* ClassLinker::FindClass(Thread* self, const char* descriptor,
}
mirror::Class* ClassLinker::DefineClass(const char* descriptor,
- const SirtRef<mirror::ClassLoader>& class_loader,
+ const Handle<mirror::ClassLoader>& class_loader,
const DexFile& dex_file,
const DexFile::ClassDef& dex_class_def) {
Thread* self = Thread::Current();
- SirtRef<mirror::Class> klass(self, NULL);
+ StackHandleScope<2> hs(self);
+ auto klass = hs.NewHandle<mirror::Class>(nullptr);
// Load the class from the dex file.
if (UNLIKELY(!init_done_)) {
// finish up init of hand crafted class_roots_
if (strcmp(descriptor, "Ljava/lang/Object;") == 0) {
- klass.reset(GetClassRoot(kJavaLangObject));
+ klass.Assign(GetClassRoot(kJavaLangObject));
} else if (strcmp(descriptor, "Ljava/lang/Class;") == 0) {
- klass.reset(GetClassRoot(kJavaLangClass));
+ klass.Assign(GetClassRoot(kJavaLangClass));
} else if (strcmp(descriptor, "Ljava/lang/String;") == 0) {
- klass.reset(GetClassRoot(kJavaLangString));
+ klass.Assign(GetClassRoot(kJavaLangString));
} else if (strcmp(descriptor, "Ljava/lang/DexCache;") == 0) {
- klass.reset(GetClassRoot(kJavaLangDexCache));
+ klass.Assign(GetClassRoot(kJavaLangDexCache));
} else if (strcmp(descriptor, "Ljava/lang/reflect/ArtField;") == 0) {
- klass.reset(GetClassRoot(kJavaLangReflectArtField));
+ klass.Assign(GetClassRoot(kJavaLangReflectArtField));
} else if (strcmp(descriptor, "Ljava/lang/reflect/ArtMethod;") == 0) {
- klass.reset(GetClassRoot(kJavaLangReflectArtMethod));
+ klass.Assign(GetClassRoot(kJavaLangReflectArtMethod));
} else {
- klass.reset(AllocClass(self, SizeOfClass(dex_file, dex_class_def)));
+ klass.Assign(AllocClass(self, SizeOfClass(dex_file, dex_class_def)));
}
} else {
- klass.reset(AllocClass(self, SizeOfClass(dex_file, dex_class_def)));
+ klass.Assign(AllocClass(self, SizeOfClass(dex_file, dex_class_def)));
}
- if (UNLIKELY(klass.get() == NULL)) {
+ if (UNLIKELY(klass.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // Expect an OOME.
return NULL;
}
klass->SetDexCache(FindDexCache(dex_file));
- LoadClass(dex_file, dex_class_def, klass, class_loader.get());
+ LoadClass(dex_file, dex_class_def, klass, class_loader.Get());
// Check for a pending exception during load
if (self->IsExceptionPending()) {
klass->SetStatus(mirror::Class::kStatusError, self);
@@ -1462,7 +1474,7 @@ mirror::Class* ClassLinker::DefineClass(const char* descriptor,
ObjectLock<mirror::Class> lock(self, &klass);
klass->SetClinitThreadId(self->GetTid());
// Add the newly loaded class to the loaded classes table.
- mirror::Class* existing = InsertClass(descriptor, klass.get(), Hash(descriptor));
+ mirror::Class* existing = InsertClass(descriptor, klass.Get(), Hash(descriptor));
if (existing != NULL) {
// We failed to insert because we raced with another thread. Calling EnsureResolved may cause
// this thread to block.
@@ -1479,7 +1491,7 @@ mirror::Class* ClassLinker::DefineClass(const char* descriptor,
// Link the class (if necessary)
CHECK(!klass->IsResolved());
// TODO: Use fast jobjects?
- SirtRef<mirror::ObjectArray<mirror::Class> > interfaces(self, nullptr);
+ auto interfaces = hs.NewHandle<mirror::ObjectArray<mirror::Class>>(nullptr);
if (!LinkClass(self, klass, interfaces)) {
// Linking failed.
klass->SetStatus(mirror::Class::kStatusError, self);
@@ -1498,9 +1510,9 @@ mirror::Class* ClassLinker::DefineClass(const char* descriptor,
* The class has been prepared and resolved but possibly not yet verified
* at this point.
*/
- Dbg::PostClassPrepare(klass.get());
+ Dbg::PostClassPrepare(klass.Get());
- return klass.get();
+ return klass.Get();
}
// Precomputes size that will be needed for Class, matching LinkStaticFields
@@ -1765,7 +1777,7 @@ void ClassLinker::FixupStaticTrampolines(mirror::Class* klass) {
// Ignore virtual methods on the iterator.
}
-static void LinkCode(const SirtRef<mirror::ArtMethod>& method, const OatFile::OatClass* oat_class,
+static void LinkCode(const Handle<mirror::ArtMethod>& method, const OatFile::OatClass* oat_class,
const DexFile& dex_file, uint32_t dex_method_index, uint32_t method_index)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Method shouldn't have already been linked.
@@ -1774,11 +1786,11 @@ static void LinkCode(const SirtRef<mirror::ArtMethod>& method, const OatFile::Oa
// Every kind of method should at least get an invoke stub from the oat_method.
// non-abstract methods also get their code pointers.
const OatFile::OatMethod oat_method = oat_class->GetOatMethod(method_index);
- oat_method.LinkMethod(method.get());
+ oat_method.LinkMethod(method.Get());
// Install entry point from interpreter.
Runtime* runtime = Runtime::Current();
- bool enter_interpreter = NeedsInterpreter(method.get(),
+ bool enter_interpreter = NeedsInterpreter(method.Get(),
method->GetEntryPointFromQuickCompiledCode(),
method->GetEntryPointFromPortableCompiledCode());
if (enter_interpreter && !method->IsNative()) {
@@ -1832,7 +1844,7 @@ static void LinkCode(const SirtRef<mirror::ArtMethod>& method, const OatFile::Oa
}
// Allow instrumentation its chance to hijack code.
- runtime->GetInstrumentation()->UpdateMethodsCode(method.get(),
+ runtime->GetInstrumentation()->UpdateMethodsCode(method.Get(),
method->GetEntryPointFromQuickCompiledCode(),
method->GetEntryPointFromPortableCompiledCode(),
have_portable_code);
@@ -1840,9 +1852,9 @@ static void LinkCode(const SirtRef<mirror::ArtMethod>& method, const OatFile::Oa
void ClassLinker::LoadClass(const DexFile& dex_file,
const DexFile::ClassDef& dex_class_def,
- const SirtRef<mirror::Class>& klass,
+ const Handle<mirror::Class>& klass,
mirror::ClassLoader* class_loader) {
- CHECK(klass.get() != NULL);
+ CHECK(klass.Get() != NULL);
CHECK(klass->GetDexCache() != NULL);
CHECK_EQ(mirror::Class::kStatusNotReady, klass->GetStatus());
const char* descriptor = dex_file.GetClassDescriptor(dex_class_def);
@@ -1878,7 +1890,7 @@ void ClassLinker::LoadClass(const DexFile& dex_file,
void ClassLinker::LoadClassMembers(const DexFile& dex_file,
const byte* class_data,
- const SirtRef<mirror::Class>& klass,
+ const Handle<mirror::Class>& klass,
mirror::ClassLoader* class_loader,
const OatFile::OatClass* oat_class) {
// Load fields.
@@ -1902,21 +1914,23 @@ void ClassLinker::LoadClassMembers(const DexFile& dex_file,
klass->SetIFields(fields);
}
for (size_t i = 0; it.HasNextStaticField(); i++, it.Next()) {
- SirtRef<mirror::ArtField> sfield(self, AllocArtField(self));
- if (UNLIKELY(sfield.get() == NULL)) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ArtField> sfield(hs.NewHandle(AllocArtField(self)));
+ if (UNLIKELY(sfield.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
return;
}
- klass->SetStaticField(i, sfield.get());
+ klass->SetStaticField(i, sfield.Get());
LoadField(dex_file, it, klass, sfield);
}
for (size_t i = 0; it.HasNextInstanceField(); i++, it.Next()) {
- SirtRef<mirror::ArtField> ifield(self, AllocArtField(self));
- if (UNLIKELY(ifield.get() == NULL)) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ArtField> ifield(hs.NewHandle(AllocArtField(self)));
+ if (UNLIKELY(ifield.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
return;
}
- klass->SetInstanceField(i, ifield.get());
+ klass->SetInstanceField(i, ifield.Get());
LoadField(dex_file, it, klass, ifield);
}
@@ -1943,12 +1957,13 @@ void ClassLinker::LoadClassMembers(const DexFile& dex_file,
}
size_t class_def_method_index = 0;
for (size_t i = 0; it.HasNextDirectMethod(); i++, it.Next()) {
- SirtRef<mirror::ArtMethod> method(self, LoadMethod(self, dex_file, it, klass));
- if (UNLIKELY(method.get() == NULL)) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ArtMethod> method(hs.NewHandle(LoadMethod(self, dex_file, it, klass)));
+ if (UNLIKELY(method.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
return;
}
- klass->SetDirectMethod(i, method.get());
+ klass->SetDirectMethod(i, method.Get());
if (oat_class != nullptr) {
LinkCode(method, oat_class, dex_file, it.GetMemberIndex(), class_def_method_index);
}
@@ -1956,12 +1971,13 @@ void ClassLinker::LoadClassMembers(const DexFile& dex_file,
class_def_method_index++;
}
for (size_t i = 0; it.HasNextVirtualMethod(); i++, it.Next()) {
- SirtRef<mirror::ArtMethod> method(self, LoadMethod(self, dex_file, it, klass));
- if (UNLIKELY(method.get() == NULL)) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ArtMethod> method(hs.NewHandle(LoadMethod(self, dex_file, it, klass)));
+ if (UNLIKELY(method.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
return;
}
- klass->SetVirtualMethod(i, method.get());
+ klass->SetVirtualMethod(i, method.Get());
DCHECK_EQ(class_def_method_index, it.NumDirectMethods() + i);
if (oat_class != nullptr) {
LinkCode(method, oat_class, dex_file, it.GetMemberIndex(), class_def_method_index);
@@ -1972,17 +1988,17 @@ void ClassLinker::LoadClassMembers(const DexFile& dex_file,
}
void ClassLinker::LoadField(const DexFile& /*dex_file*/, const ClassDataItemIterator& it,
- const SirtRef<mirror::Class>& klass,
- const SirtRef<mirror::ArtField>& dst) {
+ const Handle<mirror::Class>& klass,
+ const Handle<mirror::ArtField>& dst) {
uint32_t field_idx = it.GetMemberIndex();
dst->SetDexFieldIndex(field_idx);
- dst->SetDeclaringClass(klass.get());
+ dst->SetDeclaringClass(klass.Get());
dst->SetAccessFlags(it.GetMemberAccessFlags());
}
mirror::ArtMethod* ClassLinker::LoadMethod(Thread* self, const DexFile& dex_file,
const ClassDataItemIterator& it,
- const SirtRef<mirror::Class>& klass) {
+ const Handle<mirror::Class>& klass) {
uint32_t dex_method_idx = it.GetMemberIndex();
const DexFile::MethodId& method_id = dex_file.GetMethodId(dex_method_idx);
const char* method_name = dex_file.StringDataByIdx(method_id.name_idx_);
@@ -1996,7 +2012,7 @@ mirror::ArtMethod* ClassLinker::LoadMethod(Thread* self, const DexFile& dex_file
const char* old_cause = self->StartAssertNoThreadSuspension("LoadMethod");
dst->SetDexMethodIndex(dex_method_idx);
- dst->SetDeclaringClass(klass.get());
+ dst->SetDeclaringClass(klass.Get());
dst->SetCodeItemOffset(it.GetMethodCodeItemOffset());
dst->SetDexCacheStrings(klass->GetDexCache()->GetStrings());
@@ -2012,7 +2028,7 @@ mirror::ArtMethod* ClassLinker::LoadMethod(Thread* self, const DexFile& dex_file
if (klass->GetClassLoader() != NULL) { // All non-boot finalizer methods are flagged
klass->SetFinalizable();
} else {
- ClassHelper kh(klass.get());
+ ClassHelper kh(klass.Get());
const char* klass_descriptor = kh.GetDescriptor();
// The Enum class declares a "final" finalize() method to prevent subclasses from
// introducing a finalizer. We don't want to set the finalizable flag for Enum or its
@@ -2034,7 +2050,7 @@ mirror::ArtMethod* ClassLinker::LoadMethod(Thread* self, const DexFile& dex_file
} else {
if (UNLIKELY((access_flags & kAccConstructor) == 0)) {
LOG(WARNING) << method_name << " didn't have expected constructor access flag in class "
- << PrettyDescriptor(klass.get()) << " in dex file " << dex_file.GetLocation();
+ << PrettyDescriptor(klass.Get()) << " in dex file " << dex_file.GetLocation();
access_flags |= kAccConstructor;
}
}
@@ -2047,14 +2063,15 @@ mirror::ArtMethod* ClassLinker::LoadMethod(Thread* self, const DexFile& dex_file
void ClassLinker::AppendToBootClassPath(const DexFile& dex_file) {
Thread* self = Thread::Current();
- SirtRef<mirror::DexCache> dex_cache(self, AllocDexCache(self, dex_file));
- CHECK(dex_cache.get() != NULL) << "Failed to allocate dex cache for " << dex_file.GetLocation();
+ StackHandleScope<1> hs(self);
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(AllocDexCache(self, dex_file)));
+ CHECK(dex_cache.Get() != NULL) << "Failed to allocate dex cache for " << dex_file.GetLocation();
AppendToBootClassPath(dex_file, dex_cache);
}
void ClassLinker::AppendToBootClassPath(const DexFile& dex_file,
- const SirtRef<mirror::DexCache>& dex_cache) {
- CHECK(dex_cache.get() != NULL) << dex_file.GetLocation();
+ const Handle<mirror::DexCache>& dex_cache) {
+ CHECK(dex_cache.Get() != NULL) << dex_file.GetLocation();
boot_class_path_.push_back(&dex_file);
RegisterDexFile(dex_file, dex_cache);
}
@@ -2075,12 +2092,12 @@ bool ClassLinker::IsDexFileRegistered(const DexFile& dex_file) const {
}
void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file,
- const SirtRef<mirror::DexCache>& dex_cache) {
+ const Handle<mirror::DexCache>& dex_cache) {
dex_lock_.AssertExclusiveHeld(Thread::Current());
- CHECK(dex_cache.get() != NULL) << dex_file.GetLocation();
+ CHECK(dex_cache.Get() != NULL) << dex_file.GetLocation();
CHECK(dex_cache->GetLocation()->Equals(dex_file.GetLocation()))
<< dex_cache->GetLocation()->ToModifiedUtf8() << " " << dex_file.GetLocation();
- dex_caches_.push_back(dex_cache.get());
+ dex_caches_.push_back(dex_cache.Get());
dex_cache->SetDexFile(&dex_file);
if (log_new_dex_caches_roots_) {
// TODO: This is not safe if we can remove dex caches.
@@ -2099,8 +2116,9 @@ void ClassLinker::RegisterDexFile(const DexFile& dex_file) {
// Don't alloc while holding the lock, since allocation may need to
// suspend all threads and another thread may need the dex_lock_ to
// get to a suspend point.
- SirtRef<mirror::DexCache> dex_cache(self, AllocDexCache(self, dex_file));
- CHECK(dex_cache.get() != NULL) << "Failed to allocate dex cache for " << dex_file.GetLocation();
+ StackHandleScope<1> hs(self);
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(AllocDexCache(self, dex_file)));
+ CHECK(dex_cache.Get() != NULL) << "Failed to allocate dex cache for " << dex_file.GetLocation();
{
WriterMutexLock mu(self, dex_lock_);
if (IsDexFileRegisteredLocked(dex_file)) {
@@ -2111,7 +2129,7 @@ void ClassLinker::RegisterDexFile(const DexFile& dex_file) {
}
void ClassLinker::RegisterDexFile(const DexFile& dex_file,
- const SirtRef<mirror::DexCache>& dex_cache) {
+ const Handle<mirror::DexCache>& dex_cache) {
WriterMutexLock mu(Thread::Current(), dex_lock_);
RegisterDexFileLocked(dex_file, dex_cache);
}
@@ -2162,8 +2180,9 @@ mirror::Class* ClassLinker::InitializePrimitiveClass(mirror::Class* primitive_cl
CHECK(primitive_class != NULL);
// Must hold lock on object when initializing.
Thread* self = Thread::Current();
- SirtRef<mirror::Class> sirt_class(self, primitive_class);
- ObjectLock<mirror::Class> lock(self, &sirt_class);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_class(hs.NewHandle(primitive_class));
+ ObjectLock<mirror::Class> lock(self, &h_class);
primitive_class->SetAccessFlags(kAccPublic | kAccFinal | kAccAbstract);
primitive_class->SetPrimitiveType(type);
primitive_class->SetStatus(mirror::Class::kStatusInitialized, self);
@@ -2187,11 +2206,12 @@ mirror::Class* ClassLinker::InitializePrimitiveClass(mirror::Class* primitive_cl
//
// Returns NULL with an exception raised on failure.
mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descriptor,
- const SirtRef<mirror::ClassLoader>& class_loader) {
+ const Handle<mirror::ClassLoader>& class_loader) {
// Identify the underlying component type
CHECK_EQ('[', descriptor[0]);
- SirtRef<mirror::Class> component_type(self, FindClass(self, descriptor + 1, class_loader));
- if (component_type.get() == nullptr) {
+ StackHandleScope<2> hs(self);
+ Handle<mirror::Class> component_type(hs.NewHandle(FindClass(self, descriptor + 1, class_loader)));
+ if (component_type.Get() == nullptr) {
DCHECK(self->IsExceptionPending());
return nullptr;
}
@@ -2213,7 +2233,7 @@ mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descripto
// because we effectively do this lookup again when we add the new
// class to the hash table --- necessary because of possible races with
// other threads.)
- if (class_loader.get() != component_type->GetClassLoader()) {
+ if (class_loader.Get() != component_type->GetClassLoader()) {
mirror::Class* new_class = LookupClass(descriptor, component_type->GetClassLoader());
if (new_class != NULL) {
return new_class;
@@ -2228,33 +2248,33 @@ mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descripto
//
// Array classes are simple enough that we don't need to do a full
// link step.
- SirtRef<mirror::Class> new_class(self, NULL);
+ auto new_class = hs.NewHandle<mirror::Class>(nullptr);
if (UNLIKELY(!init_done_)) {
// Classes that were hand created, ie not by FindSystemClass
if (strcmp(descriptor, "[Ljava/lang/Class;") == 0) {
- new_class.reset(GetClassRoot(kClassArrayClass));
+ new_class.Assign(GetClassRoot(kClassArrayClass));
} else if (strcmp(descriptor, "[Ljava/lang/Object;") == 0) {
- new_class.reset(GetClassRoot(kObjectArrayClass));
+ new_class.Assign(GetClassRoot(kObjectArrayClass));
} else if (strcmp(descriptor, class_roots_descriptors_[kJavaLangStringArrayClass]) == 0) {
- new_class.reset(GetClassRoot(kJavaLangStringArrayClass));
+ new_class.Assign(GetClassRoot(kJavaLangStringArrayClass));
} else if (strcmp(descriptor,
class_roots_descriptors_[kJavaLangReflectArtMethodArrayClass]) == 0) {
- new_class.reset(GetClassRoot(kJavaLangReflectArtMethodArrayClass));
+ new_class.Assign(GetClassRoot(kJavaLangReflectArtMethodArrayClass));
} else if (strcmp(descriptor,
class_roots_descriptors_[kJavaLangReflectArtFieldArrayClass]) == 0) {
- new_class.reset(GetClassRoot(kJavaLangReflectArtFieldArrayClass));
+ new_class.Assign(GetClassRoot(kJavaLangReflectArtFieldArrayClass));
} else if (strcmp(descriptor, "[C") == 0) {
- new_class.reset(GetClassRoot(kCharArrayClass));
+ new_class.Assign(GetClassRoot(kCharArrayClass));
} else if (strcmp(descriptor, "[I") == 0) {
- new_class.reset(GetClassRoot(kIntArrayClass));
+ new_class.Assign(GetClassRoot(kIntArrayClass));
}
}
- if (new_class.get() == nullptr) {
- new_class.reset(AllocClass(self, sizeof(mirror::Class)));
- if (new_class.get() == nullptr) {
+ if (new_class.Get() == nullptr) {
+ new_class.Assign(AllocClass(self, sizeof(mirror::Class)));
+ if (new_class.Get() == nullptr) {
return nullptr;
}
- new_class->SetComponentType(component_type.get());
+ new_class->SetComponentType(component_type.Get());
}
ObjectLock<mirror::Class> lock(self, &new_class); // Must hold lock on object when initializing.
DCHECK(new_class->GetComponentType() != NULL);
@@ -2294,9 +2314,9 @@ mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descripto
new_class->SetAccessFlags(access_flags);
- mirror::Class* existing = InsertClass(descriptor, new_class.get(), Hash(descriptor));
+ mirror::Class* existing = InsertClass(descriptor, new_class.Get(), Hash(descriptor));
if (existing == nullptr) {
- return new_class.get();
+ return new_class.Get();
}
// Another thread must have loaded the class after we
// started but before we finished. Abandon what we've
@@ -2528,7 +2548,7 @@ void ClassLinker::LookupClasses(const char* descriptor, std::vector<mirror::Clas
}
}
-void ClassLinker::VerifyClass(const SirtRef<mirror::Class>& klass) {
+void ClassLinker::VerifyClass(const Handle<mirror::Class>& klass) {
// TODO: assert that the monitor on the Class is held
Thread* self = Thread::Current();
ObjectLock<mirror::Class> lock(self, &klass);
@@ -2542,7 +2562,7 @@ void ClassLinker::VerifyClass(const SirtRef<mirror::Class>& klass) {
// The class might already be erroneous, for example at compile time if we attempted to verify
// this class as a parent to another.
if (klass->IsErroneous()) {
- ThrowEarlierClassFailure(klass.get());
+ ThrowEarlierClassFailure(klass.Get());
return;
}
@@ -2550,7 +2570,7 @@ void ClassLinker::VerifyClass(const SirtRef<mirror::Class>& klass) {
klass->SetStatus(mirror::Class::kStatusVerifying, self);
} else {
CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusRetryVerificationAtRuntime)
- << PrettyClass(klass.get());
+ << PrettyClass(klass.Get());
CHECK(!Runtime::Current()->IsCompiler());
klass->SetStatus(mirror::Class::kStatusVerifyingAtRuntime, self);
}
@@ -2562,8 +2582,9 @@ void ClassLinker::VerifyClass(const SirtRef<mirror::Class>& klass) {
}
// Verify super class.
- SirtRef<mirror::Class> super(self, klass->GetSuperClass());
- if (super.get() != NULL) {
+ StackHandleScope<2> hs(self);
+ Handle<mirror::Class> super(hs.NewHandle(klass->GetSuperClass()));
+ if (super.Get() != NULL) {
// Acquire lock to prevent races on verifying the super class.
ObjectLock<mirror::Class> lock(self, &super);
@@ -2572,16 +2593,16 @@ void ClassLinker::VerifyClass(const SirtRef<mirror::Class>& klass) {
}
if (!super->IsCompileTimeVerified()) {
std::string error_msg(StringPrintf("Rejecting class %s that attempts to sub-class erroneous class %s",
- PrettyDescriptor(klass.get()).c_str(),
- PrettyDescriptor(super.get()).c_str()));
+ PrettyDescriptor(klass.Get()).c_str(),
+ PrettyDescriptor(super.Get()).c_str()));
LOG(ERROR) << error_msg << " in " << klass->GetDexCache()->GetLocation()->ToModifiedUtf8();
- SirtRef<mirror::Throwable> cause(self, self->GetException(NULL));
- if (cause.get() != nullptr) {
+ Handle<mirror::Throwable> cause(hs.NewHandle(self->GetException(nullptr)));
+ if (cause.Get() != nullptr) {
self->ClearException();
}
- ThrowVerifyError(klass.get(), "%s", error_msg.c_str());
- if (cause.get() != nullptr) {
- self->GetException(nullptr)->SetCause(cause.get());
+ ThrowVerifyError(klass.Get(), "%s", error_msg.c_str());
+ if (cause.Get() != nullptr) {
+ self->GetException(nullptr)->SetCause(cause.Get());
}
ClassReference ref(klass->GetDexCache()->GetDexFile(), klass->GetDexClassDefIndex());
if (Runtime::Current()->IsCompiler()) {
@@ -2595,26 +2616,26 @@ void ClassLinker::VerifyClass(const SirtRef<mirror::Class>& klass) {
// Try to use verification information from the oat file, otherwise do runtime verification.
const DexFile& dex_file = *klass->GetDexCache()->GetDexFile();
mirror::Class::Status oat_file_class_status(mirror::Class::kStatusNotReady);
- bool preverified = VerifyClassUsingOatFile(dex_file, klass.get(), oat_file_class_status);
+ bool preverified = VerifyClassUsingOatFile(dex_file, klass.Get(), oat_file_class_status);
if (oat_file_class_status == mirror::Class::kStatusError) {
VLOG(class_linker) << "Skipping runtime verification of erroneous class "
- << PrettyDescriptor(klass.get()) << " in "
+ << PrettyDescriptor(klass.Get()) << " in "
<< klass->GetDexCache()->GetLocation()->ToModifiedUtf8();
- ThrowVerifyError(klass.get(), "Rejecting class %s because it failed compile-time verification",
- PrettyDescriptor(klass.get()).c_str());
+ ThrowVerifyError(klass.Get(), "Rejecting class %s because it failed compile-time verification",
+ PrettyDescriptor(klass.Get()).c_str());
klass->SetStatus(mirror::Class::kStatusError, self);
return;
}
verifier::MethodVerifier::FailureKind verifier_failure = verifier::MethodVerifier::kNoFailure;
std::string error_msg;
if (!preverified) {
- verifier_failure = verifier::MethodVerifier::VerifyClass(klass.get(),
+ verifier_failure = verifier::MethodVerifier::VerifyClass(klass.Get(),
Runtime::Current()->IsCompiler(),
&error_msg);
}
if (preverified || verifier_failure != verifier::MethodVerifier::kHardFailure) {
if (!preverified && verifier_failure != verifier::MethodVerifier::kNoFailure) {
- VLOG(class_linker) << "Soft verification failure in class " << PrettyDescriptor(klass.get())
+ VLOG(class_linker) << "Soft verification failure in class " << PrettyDescriptor(klass.Get())
<< " in " << klass->GetDexCache()->GetLocation()->ToModifiedUtf8()
<< " because: " << error_msg;
}
@@ -2624,7 +2645,7 @@ void ClassLinker::VerifyClass(const SirtRef<mirror::Class>& klass) {
if (verifier_failure == verifier::MethodVerifier::kNoFailure) {
// Even though there were no verifier failures we need to respect whether the super-class
// was verified or requiring runtime reverification.
- if (super.get() == NULL || super->IsVerified()) {
+ if (super.Get() == NULL || super->IsVerified()) {
klass->SetStatus(mirror::Class::kStatusVerified, self);
} else {
CHECK_EQ(super->GetStatus(), mirror::Class::kStatusRetryVerificationAtRuntime);
@@ -2644,11 +2665,11 @@ void ClassLinker::VerifyClass(const SirtRef<mirror::Class>& klass) {
}
}
} else {
- LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(klass.get())
+ LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(klass.Get())
<< " in " << klass->GetDexCache()->GetLocation()->ToModifiedUtf8()
<< " because: " << error_msg;
self->AssertNoPendingException();
- ThrowVerifyError(klass.get(), "%s", error_msg.c_str());
+ ThrowVerifyError(klass.Get(), "%s", error_msg.c_str());
klass->SetStatus(mirror::Class::kStatusError, self);
}
if (preverified || verifier_failure == verifier::MethodVerifier::kNoFailure) {
@@ -2739,7 +2760,7 @@ bool ClassLinker::VerifyClassUsingOatFile(const DexFile& dex_file, mirror::Class
}
void ClassLinker::ResolveClassExceptionHandlerTypes(const DexFile& dex_file,
- const SirtRef<mirror::Class>& klass) {
+ const Handle<mirror::Class>& klass) {
for (size_t i = 0; i < klass->NumDirectMethods(); i++) {
ResolveMethodExceptionHandlerTypes(dex_file, klass->GetDirectMethod(i));
}
@@ -2780,15 +2801,16 @@ void ClassLinker::ResolveMethodExceptionHandlerTypes(const DexFile& dex_file,
static void CheckProxyConstructor(mirror::ArtMethod* constructor);
static void CheckProxyMethod(mirror::ArtMethod* method,
- SirtRef<mirror::ArtMethod>& prototype);
+ Handle<mirror::ArtMethod>& prototype);
mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccess& soa, jstring name,
jobjectArray interfaces, jobject loader,
jobjectArray methods, jobjectArray throws) {
Thread* self = soa.Self();
- SirtRef<mirror::Class> klass(self, AllocClass(self, GetClassRoot(kJavaLangClass),
- sizeof(mirror::SynthesizedProxyClass)));
- if (klass.get() == NULL) {
+ StackHandleScope<8> hs(self);
+ Handle<mirror::Class> klass(hs.NewHandle(AllocClass(self, GetClassRoot(kJavaLangClass),
+ sizeof(mirror::SynthesizedProxyClass))));
+ if (klass.Get() == NULL) {
CHECK(self->IsExceptionPending()); // OOME.
return NULL;
}
@@ -2813,38 +2835,38 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccess& soa, jstring na
}
// 1. Create a static field 'interfaces' that holds the _declared_ interfaces implemented by
// our proxy, so Class.getInterfaces doesn't return the flattened set.
- SirtRef<mirror::ArtField> interfaces_sfield(self, AllocArtField(self));
- if (UNLIKELY(interfaces_sfield.get() == NULL)) {
+ Handle<mirror::ArtField> interfaces_sfield(hs.NewHandle(AllocArtField(self)));
+ if (UNLIKELY(interfaces_sfield.Get() == nullptr)) {
CHECK(self->IsExceptionPending()); // OOME.
- return NULL;
+ return nullptr;
}
- klass->SetStaticField(0, interfaces_sfield.get());
+ klass->SetStaticField(0, interfaces_sfield.Get());
interfaces_sfield->SetDexFieldIndex(0);
- interfaces_sfield->SetDeclaringClass(klass.get());
+ interfaces_sfield->SetDeclaringClass(klass.Get());
interfaces_sfield->SetAccessFlags(kAccStatic | kAccPublic | kAccFinal);
// 2. Create a static field 'throws' that holds exceptions thrown by our methods.
- SirtRef<mirror::ArtField> throws_sfield(self, AllocArtField(self));
- if (UNLIKELY(throws_sfield.get() == NULL)) {
+ Handle<mirror::ArtField> throws_sfield(hs.NewHandle(AllocArtField(self)));
+ if (UNLIKELY(throws_sfield.Get() == nullptr)) {
CHECK(self->IsExceptionPending()); // OOME.
- return NULL;
+ return nullptr;
}
- klass->SetStaticField(1, throws_sfield.get());
+ klass->SetStaticField(1, throws_sfield.Get());
throws_sfield->SetDexFieldIndex(1);
- throws_sfield->SetDeclaringClass(klass.get());
+ throws_sfield->SetDeclaringClass(klass.Get());
throws_sfield->SetAccessFlags(kAccStatic | kAccPublic | kAccFinal);
// Proxies have 1 direct method, the constructor
{
mirror::ObjectArray<mirror::ArtMethod>* directs = AllocArtMethodArray(self, 1);
- if (UNLIKELY(directs == NULL)) {
+ if (UNLIKELY(directs == nullptr)) {
CHECK(self->IsExceptionPending()); // OOME.
- return NULL;
+ return nullptr;
}
klass->SetDirectMethods(directs);
mirror::ArtMethod* constructor = CreateProxyConstructor(self, klass, proxy_class);
- if (UNLIKELY(constructor == NULL)) {
+ if (UNLIKELY(constructor == nullptr)) {
CHECK(self->IsExceptionPending()); // OOME.
- return NULL;
+ return nullptr;
}
klass->SetDirectMethod(0, constructor);
}
@@ -2861,13 +2883,14 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccess& soa, jstring na
klass->SetVirtualMethods(virtuals);
}
for (size_t i = 0; i < num_virtual_methods; ++i) {
+ StackHandleScope<1> hs(self);
mirror::ObjectArray<mirror::ArtMethod>* decoded_methods =
soa.Decode<mirror::ObjectArray<mirror::ArtMethod>*>(methods);
- SirtRef<mirror::ArtMethod> prototype(self, decoded_methods->Get(i));
+ Handle<mirror::ArtMethod> prototype(hs.NewHandle(decoded_methods->Get(i)));
mirror::ArtMethod* clone = CreateProxyMethod(self, klass, prototype);
- if (UNLIKELY(clone == NULL)) {
+ if (UNLIKELY(clone == nullptr)) {
CHECK(self->IsExceptionPending()); // OOME.
- return NULL;
+ return nullptr;
}
klass->SetVirtualMethod(i, clone);
}
@@ -2879,26 +2902,27 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccess& soa, jstring na
{
ObjectLock<mirror::Class> lock(self, &klass); // Must hold lock on object when resolved.
// Link the fields and virtual methods, creating vtable and iftables
- SirtRef<mirror::ObjectArray<mirror::Class> > sirt_interfaces(
- self, soa.Decode<mirror::ObjectArray<mirror::Class>*>(interfaces));
- if (!LinkClass(self, klass, sirt_interfaces)) {
+ Handle<mirror::ObjectArray<mirror::Class> > h_interfaces(
+ hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Class>*>(interfaces)));
+ if (!LinkClass(self, klass, h_interfaces)) {
klass->SetStatus(mirror::Class::kStatusError, self);
return nullptr;
}
- interfaces_sfield->SetObject<false>(klass.get(), soa.Decode<mirror::ObjectArray<mirror::Class>*>(interfaces));
- throws_sfield->SetObject<false>(klass.get(), soa.Decode<mirror::ObjectArray<mirror::ObjectArray<mirror::Class> >*>(throws));
+ interfaces_sfield->SetObject<false>(klass.Get(), soa.Decode<mirror::ObjectArray<mirror::Class>*>(interfaces));
+ throws_sfield->SetObject<false>(klass.Get(), soa.Decode<mirror::ObjectArray<mirror::ObjectArray<mirror::Class> >*>(throws));
klass->SetStatus(mirror::Class::kStatusInitialized, self);
}
// sanity checks
if (kIsDebugBuild) {
- CHECK(klass->GetIFields() == NULL);
+ CHECK(klass->GetIFields() == nullptr);
CheckProxyConstructor(klass->GetDirectMethod(0));
for (size_t i = 0; i < num_virtual_methods; ++i) {
+ StackHandleScope<1> hs(self);
mirror::ObjectArray<mirror::ArtMethod>* decoded_methods =
soa.Decode<mirror::ObjectArray<mirror::ArtMethod>*>(methods);
- SirtRef<mirror::ArtMethod> prototype(self, decoded_methods->Get(i));
+ Handle<mirror::ArtMethod> prototype(hs.NewHandle(decoded_methods->Get(i)));
CheckProxyMethod(klass->GetVirtualMethod(i), prototype);
}
@@ -2912,14 +2936,14 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccess& soa, jstring na
CHECK_EQ(PrettyField(klass->GetStaticField(1)), throws_field_name);
mirror::SynthesizedProxyClass* synth_proxy_class =
- down_cast<mirror::SynthesizedProxyClass*>(klass.get());
+ down_cast<mirror::SynthesizedProxyClass*>(klass.Get());
CHECK_EQ(synth_proxy_class->GetInterfaces(), soa.Decode<mirror::ObjectArray<mirror::Class>*>(interfaces));
CHECK_EQ(synth_proxy_class->GetThrows(), soa.Decode<mirror::ObjectArray<mirror::ObjectArray<mirror::Class> >*>(throws));
}
- std::string descriptor(GetDescriptorForProxy(klass.get()));
- mirror::Class* existing = InsertClass(descriptor.c_str(), klass.get(), Hash(descriptor.c_str()));
+ std::string descriptor(GetDescriptorForProxy(klass.Get()));
+ mirror::Class* existing = InsertClass(descriptor.c_str(), klass.Get(), Hash(descriptor.c_str()));
CHECK(existing == nullptr);
- return klass.get();
+ return klass.Get();
}
std::string ClassLinker::GetDescriptorForProxy(mirror::Class* proxy_class) {
@@ -2954,7 +2978,7 @@ mirror::ArtMethod* ClassLinker::FindMethodForProxy(mirror::Class* proxy_class,
mirror::ArtMethod* ClassLinker::CreateProxyConstructor(Thread* self,
- const SirtRef<mirror::Class>& klass,
+ const Handle<mirror::Class>& klass,
mirror::Class* proxy_class) {
// Create constructor for Proxy that must initialize h
mirror::ObjectArray<mirror::ArtMethod>* proxy_direct_methods =
@@ -2971,7 +2995,7 @@ mirror::ArtMethod* ClassLinker::CreateProxyConstructor(Thread* self,
}
// Make this constructor public and fix the class to be our Proxy version
constructor->SetAccessFlags((constructor->GetAccessFlags() & ~kAccProtected) | kAccPublic);
- constructor->SetDeclaringClass(klass.get());
+ constructor->SetDeclaringClass(klass.Get());
return constructor;
}
@@ -2985,12 +3009,12 @@ static void CheckProxyConstructor(mirror::ArtMethod* constructor)
}
mirror::ArtMethod* ClassLinker::CreateProxyMethod(Thread* self,
- const SirtRef<mirror::Class>& klass,
- const SirtRef<mirror::ArtMethod>& prototype) {
+ const Handle<mirror::Class>& klass,
+ const Handle<mirror::ArtMethod>& prototype) {
// Ensure prototype is in dex cache so that we can use the dex cache to look up the overridden
// prototype method
prototype->GetDeclaringClass()->GetDexCache()->SetResolvedMethod(prototype->GetDexMethodIndex(),
- prototype.get());
+ prototype.Get());
// We steal everything from the prototype (such as DexCache, invoke stub, etc.) then specialize
// as necessary
mirror::ArtMethod* method = down_cast<mirror::ArtMethod*>(prototype->Clone(self));
@@ -3001,7 +3025,7 @@ mirror::ArtMethod* ClassLinker::CreateProxyMethod(Thread* self,
// Set class to be the concrete proxy class and clear the abstract flag, modify exceptions to
// the intersection of throw exceptions as defined in Proxy
- method->SetDeclaringClass(klass.get());
+ method->SetDeclaringClass(klass.Get());
method->SetAccessFlags((method->GetAccessFlags() & ~kAccAbstract) | kAccFinal);
// At runtime the method looks like a reference and argument saving method, clone the code
@@ -3014,7 +3038,7 @@ mirror::ArtMethod* ClassLinker::CreateProxyMethod(Thread* self,
}
static void CheckProxyMethod(mirror::ArtMethod* method,
- SirtRef<mirror::ArtMethod>& prototype)
+ Handle<mirror::ArtMethod>& prototype)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Basic sanity
CHECK(!prototype->IsFinal());
@@ -3029,7 +3053,7 @@ static void CheckProxyMethod(mirror::ArtMethod* method,
CHECK_EQ(prototype->GetDexMethodIndex(), method->GetDexMethodIndex());
MethodHelper mh(method);
- MethodHelper mh2(prototype.get());
+ MethodHelper mh2(prototype.Get());
CHECK_STREQ(mh.GetName(), mh2.GetName());
CHECK_STREQ(mh.GetShorty(), mh2.GetShorty());
// More complex sanity - via dex cache
@@ -3075,7 +3099,7 @@ bool ClassLinker::IsInitialized() const {
return init_done_;
}
-bool ClassLinker::InitializeClass(const SirtRef<mirror::Class>& klass, bool can_init_statics,
+bool ClassLinker::InitializeClass(const Handle<mirror::Class>& klass, bool can_init_statics,
bool can_init_parents) {
// see JLS 3rd edition, 12.4.2 "Detailed Initialization Procedure" for the locking protocol
@@ -3087,7 +3111,7 @@ bool ClassLinker::InitializeClass(const SirtRef<mirror::Class>& klass, bool can_
}
// Fast fail if initialization requires a full runtime. Not part of the JLS.
- if (!CanWeInitializeClass(klass.get(), can_init_statics, can_init_parents)) {
+ if (!CanWeInitializeClass(klass.Get(), can_init_statics, can_init_parents)) {
return false;
}
@@ -3103,11 +3127,11 @@ bool ClassLinker::InitializeClass(const SirtRef<mirror::Class>& klass, bool can_
// Was the class already found to be erroneous? Done under the lock to match the JLS.
if (klass->IsErroneous()) {
- ThrowEarlierClassFailure(klass.get());
+ ThrowEarlierClassFailure(klass.Get());
return false;
}
- CHECK(klass->IsResolved()) << PrettyClass(klass.get()) << ": state=" << klass->GetStatus();
+ CHECK(klass->IsResolved()) << PrettyClass(klass.Get()) << ": state=" << klass->GetStatus();
if (!klass->IsVerified()) {
VerifyClass(klass);
@@ -3144,7 +3168,7 @@ bool ClassLinker::InitializeClass(const SirtRef<mirror::Class>& klass, bool can_
return false;
}
- CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusVerified) << PrettyClass(klass.get());
+ CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusVerified) << PrettyClass(klass.Get());
// From here out other threads may observe that we're initializing and so changes of state
// require the a notification.
@@ -3160,14 +3184,15 @@ bool ClassLinker::InitializeClass(const SirtRef<mirror::Class>& klass, bool can_
if (!super_class->IsInitialized()) {
CHECK(!super_class->IsInterface());
CHECK(can_init_parents);
- SirtRef<mirror::Class> sirt_super(self, super_class);
- bool super_initialized = InitializeClass(sirt_super, can_init_statics, true);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> handle_scope_super(hs.NewHandle(super_class));
+ bool super_initialized = InitializeClass(handle_scope_super, can_init_statics, true);
if (!super_initialized) {
// The super class was verified ahead of entering initializing, we should only be here if
// the super class became erroneous due to initialization.
- CHECK(sirt_super->IsErroneous() && self->IsExceptionPending())
- << "Super class initialization failed for " << PrettyDescriptor(sirt_super.get())
- << " that has unexpected status " << sirt_super->GetStatus()
+ CHECK(handle_scope_super->IsErroneous() && self->IsExceptionPending())
+ << "Super class initialization failed for " << PrettyDescriptor(handle_scope_super.Get())
+ << " that has unexpected status " << handle_scope_super->GetStatus()
<< "\nPending exception:\n"
<< (self->GetException(NULL) != NULL ? self->GetException(NULL)->Dump() : "");
ObjectLock<mirror::Class> lock(self, &klass);
@@ -3179,19 +3204,20 @@ bool ClassLinker::InitializeClass(const SirtRef<mirror::Class>& klass, bool can_
}
if (klass->NumStaticFields() > 0) {
- ClassHelper kh(klass.get());
+ ClassHelper kh(klass.Get());
const DexFile::ClassDef* dex_class_def = kh.GetClassDef();
CHECK(dex_class_def != NULL);
const DexFile& dex_file = kh.GetDexFile();
- SirtRef<mirror::ClassLoader> class_loader(self, klass->GetClassLoader());
- SirtRef<mirror::DexCache> dex_cache(self, kh.GetDexCache());
+ StackHandleScope<2> hs(self);
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader()));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(kh.GetDexCache()));
EncodedStaticFieldValueIterator it(dex_file, &dex_cache, &class_loader,
this, *dex_class_def);
if (it.HasNext()) {
CHECK(can_init_statics);
// We reordered the fields, so we need to be able to map the field indexes to the right fields.
SafeMap<uint32_t, mirror::ArtField*> field_map;
- ConstructFieldMap(dex_file, *dex_class_def, klass.get(), field_map);
+ ConstructFieldMap(dex_file, *dex_class_def, klass.Get(), field_map);
for (size_t i = 0; it.HasNext(); i++, it.Next()) {
if (Runtime::Current()->IsActiveTransaction()) {
it.ReadValueToField<true>(field_map.Get(i));
@@ -3229,17 +3255,17 @@ bool ClassLinker::InitializeClass(const SirtRef<mirror::Class>& klass, bool can_
// Set the class as initialized except if failed to initialize static fields.
klass->SetStatus(mirror::Class::kStatusInitialized, self);
if (VLOG_IS_ON(class_linker)) {
- ClassHelper kh(klass.get());
+ ClassHelper kh(klass.Get());
LOG(INFO) << "Initialized class " << kh.GetDescriptor() << " from " << kh.GetLocation();
}
// Opportunistically set static method trampolines to their destination.
- FixupStaticTrampolines(klass.get());
+ FixupStaticTrampolines(klass.Get());
}
}
return success;
}
-bool ClassLinker::WaitForInitializeClass(const SirtRef<mirror::Class>& klass, Thread* self,
+bool ClassLinker::WaitForInitializeClass(const Handle<mirror::Class>& klass, Thread* self,
ObjectLock<mirror::Class>& lock)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
while (true) {
@@ -3267,19 +3293,19 @@ bool ClassLinker::WaitForInitializeClass(const SirtRef<mirror::Class>& klass, Th
// The caller wants an exception, but it was thrown in a
// different thread. Synthesize one here.
ThrowNoClassDefFoundError("<clinit> failed for class %s; see exception in other thread",
- PrettyDescriptor(klass.get()).c_str());
+ PrettyDescriptor(klass.Get()).c_str());
return false;
}
if (klass->IsInitialized()) {
return true;
}
- LOG(FATAL) << "Unexpected class status. " << PrettyClass(klass.get()) << " is "
+ LOG(FATAL) << "Unexpected class status. " << PrettyClass(klass.Get()) << " is "
<< klass->GetStatus();
}
- LOG(FATAL) << "Not Reached" << PrettyClass(klass.get());
+ LOG(FATAL) << "Not Reached" << PrettyClass(klass.Get());
}
-bool ClassLinker::ValidateSuperClassDescriptors(const SirtRef<mirror::Class>& klass) {
+bool ClassLinker::ValidateSuperClassDescriptors(const Handle<mirror::Class>& klass) {
if (klass->IsInterface()) {
return true;
}
@@ -3293,8 +3319,8 @@ bool ClassLinker::ValidateSuperClassDescriptors(const SirtRef<mirror::Class>& kl
super_mh.ChangeMethod(klass->GetSuperClass()->GetVTable()->GetWithoutChecks(i));
bool is_override = mh.GetMethod() != super_mh.GetMethod();
if (is_override && !mh.HasSameSignatureWithDifferentClassLoaders(&super_mh)) {
- ThrowLinkageError(klass.get(), "Class %s method %s resolves differently in superclass %s",
- PrettyDescriptor(klass.get()).c_str(),
+ ThrowLinkageError(klass.Get(), "Class %s method %s resolves differently in superclass %s",
+ PrettyDescriptor(klass.Get()).c_str(),
PrettyMethod(mh.GetMethod()).c_str(),
PrettyDescriptor(klass->GetSuperClass()).c_str());
return false;
@@ -3309,8 +3335,8 @@ bool ClassLinker::ValidateSuperClassDescriptors(const SirtRef<mirror::Class>& kl
super_mh.ChangeMethod(klass->GetIfTable()->GetInterface(i)->GetVirtualMethod(j));
bool is_override = mh.GetMethod() != super_mh.GetMethod();
if (is_override && !mh.HasSameSignatureWithDifferentClassLoaders(&super_mh)) {
- ThrowLinkageError(klass.get(), "Class %s method %s resolves differently in interface %s",
- PrettyDescriptor(klass.get()).c_str(),
+ ThrowLinkageError(klass.Get(), "Class %s method %s resolves differently in interface %s",
+ PrettyDescriptor(klass.Get()).c_str(),
PrettyMethod(mh.GetMethod()).c_str(),
PrettyDescriptor(klass->GetIfTable()->GetInterface(i)).c_str());
return false;
@@ -3321,9 +3347,9 @@ bool ClassLinker::ValidateSuperClassDescriptors(const SirtRef<mirror::Class>& kl
return true;
}
-bool ClassLinker::EnsureInitialized(const SirtRef<mirror::Class>& c, bool can_init_fields,
+bool ClassLinker::EnsureInitialized(const Handle<mirror::Class>& c, bool can_init_fields,
bool can_init_parents) {
- DCHECK(c.get() != NULL);
+ DCHECK(c.Get() != NULL);
if (c->IsInitialized()) {
return true;
}
@@ -3331,7 +3357,7 @@ bool ClassLinker::EnsureInitialized(const SirtRef<mirror::Class>& c, bool can_in
bool success = InitializeClass(c, can_init_fields, can_init_parents);
if (!success) {
if (can_init_fields && can_init_parents) {
- CHECK(Thread::Current()->IsExceptionPending()) << PrettyClass(c.get());
+ CHECK(Thread::Current()->IsExceptionPending()) << PrettyClass(c.Get());
}
}
return success;
@@ -3342,17 +3368,17 @@ void ClassLinker::ConstructFieldMap(const DexFile& dex_file, const DexFile::Clas
SafeMap<uint32_t, mirror::ArtField*>& field_map) {
const byte* class_data = dex_file.GetClassData(dex_class_def);
ClassDataItemIterator it(dex_file, class_data);
- Thread* self = Thread::Current();
- SirtRef<mirror::DexCache> dex_cache(self, c->GetDexCache());
- SirtRef<mirror::ClassLoader> class_loader(self, c->GetClassLoader());
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(c->GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(c->GetClassLoader()));
CHECK(!kMovingFields);
for (size_t i = 0; it.HasNextStaticField(); i++, it.Next()) {
field_map.Put(i, ResolveField(dex_file, it.GetMemberIndex(), dex_cache, class_loader, true));
}
}
-bool ClassLinker::LinkClass(Thread* self, const SirtRef<mirror::Class>& klass,
- const SirtRef<mirror::ObjectArray<mirror::Class> >& interfaces) {
+bool ClassLinker::LinkClass(Thread* self, const Handle<mirror::Class>& klass,
+ const Handle<mirror::ObjectArray<mirror::Class> >& interfaces) {
CHECK_EQ(mirror::Class::kStatusLoaded, klass->GetStatus());
if (!LinkSuperClass(klass)) {
return false;
@@ -3373,22 +3399,22 @@ bool ClassLinker::LinkClass(Thread* self, const SirtRef<mirror::Class>& klass,
return true;
}
-bool ClassLinker::LoadSuperAndInterfaces(const SirtRef<mirror::Class>& klass,
+bool ClassLinker::LoadSuperAndInterfaces(const Handle<mirror::Class>& klass,
const DexFile& dex_file) {
CHECK_EQ(mirror::Class::kStatusIdx, klass->GetStatus());
const DexFile::ClassDef& class_def = dex_file.GetClassDef(klass->GetDexClassDefIndex());
uint16_t super_class_idx = class_def.superclass_idx_;
if (super_class_idx != DexFile::kDexNoIndex16) {
- mirror::Class* super_class = ResolveType(dex_file, super_class_idx, klass.get());
+ mirror::Class* super_class = ResolveType(dex_file, super_class_idx, klass.Get());
if (super_class == NULL) {
DCHECK(Thread::Current()->IsExceptionPending());
return false;
}
// Verify
if (!klass->CanAccess(super_class)) {
- ThrowIllegalAccessError(klass.get(), "Class %s extended by class %s is inaccessible",
+ ThrowIllegalAccessError(klass.Get(), "Class %s extended by class %s is inaccessible",
PrettyDescriptor(super_class).c_str(),
- PrettyDescriptor(klass.get()).c_str());
+ PrettyDescriptor(klass.Get()).c_str());
return false;
}
klass->SetSuperClass(super_class);
@@ -3397,7 +3423,7 @@ bool ClassLinker::LoadSuperAndInterfaces(const SirtRef<mirror::Class>& klass,
if (interfaces != NULL) {
for (size_t i = 0; i < interfaces->Size(); i++) {
uint16_t idx = interfaces->GetTypeItem(i).type_idx_;
- mirror::Class* interface = ResolveType(dex_file, idx, klass.get());
+ mirror::Class* interface = ResolveType(dex_file, idx, klass.Get());
if (interface == NULL) {
DCHECK(Thread::Current()->IsExceptionPending());
return false;
@@ -3405,9 +3431,9 @@ bool ClassLinker::LoadSuperAndInterfaces(const SirtRef<mirror::Class>& klass,
// Verify
if (!klass->CanAccess(interface)) {
// TODO: the RI seemed to ignore this in my testing.
- ThrowIllegalAccessError(klass.get(), "Interface %s implemented by class %s is inaccessible",
+ ThrowIllegalAccessError(klass.Get(), "Interface %s implemented by class %s is inaccessible",
PrettyDescriptor(interface).c_str(),
- PrettyDescriptor(klass.get()).c_str());
+ PrettyDescriptor(klass.Get()).c_str());
return false;
}
}
@@ -3417,33 +3443,33 @@ bool ClassLinker::LoadSuperAndInterfaces(const SirtRef<mirror::Class>& klass,
return true;
}
-bool ClassLinker::LinkSuperClass(const SirtRef<mirror::Class>& klass) {
+bool ClassLinker::LinkSuperClass(const Handle<mirror::Class>& klass) {
CHECK(!klass->IsPrimitive());
mirror::Class* super = klass->GetSuperClass();
- if (klass.get() == GetClassRoot(kJavaLangObject)) {
+ if (klass.Get() == GetClassRoot(kJavaLangObject)) {
if (super != NULL) {
- ThrowClassFormatError(klass.get(), "java.lang.Object must not have a superclass");
+ ThrowClassFormatError(klass.Get(), "java.lang.Object must not have a superclass");
return false;
}
return true;
}
if (super == NULL) {
- ThrowLinkageError(klass.get(), "No superclass defined for class %s",
- PrettyDescriptor(klass.get()).c_str());
+ ThrowLinkageError(klass.Get(), "No superclass defined for class %s",
+ PrettyDescriptor(klass.Get()).c_str());
return false;
}
// Verify
if (super->IsFinal() || super->IsInterface()) {
- ThrowIncompatibleClassChangeError(klass.get(), "Superclass %s of %s is %s",
+ ThrowIncompatibleClassChangeError(klass.Get(), "Superclass %s of %s is %s",
PrettyDescriptor(super).c_str(),
- PrettyDescriptor(klass.get()).c_str(),
+ PrettyDescriptor(klass.Get()).c_str(),
super->IsFinal() ? "declared final" : "an interface");
return false;
}
if (!klass->CanAccess(super)) {
- ThrowIllegalAccessError(klass.get(), "Superclass %s is inaccessible to class %s",
+ ThrowIllegalAccessError(klass.Get(), "Superclass %s is inaccessible to class %s",
PrettyDescriptor(super).c_str(),
- PrettyDescriptor(klass.get()).c_str());
+ PrettyDescriptor(klass.Get()).c_str());
return false;
}
@@ -3459,9 +3485,9 @@ bool ClassLinker::LinkSuperClass(const SirtRef<mirror::Class>& klass) {
}
// Disallow custom direct subclasses of java.lang.ref.Reference.
if (init_done_ && super == GetClassRoot(kJavaLangRefReference)) {
- ThrowLinkageError(klass.get(),
+ ThrowLinkageError(klass.Get(),
"Class %s attempts to subclass java.lang.ref.Reference, which is not allowed",
- PrettyDescriptor(klass.get()).c_str());
+ PrettyDescriptor(klass.Get()).c_str());
return false;
}
@@ -3476,13 +3502,13 @@ bool ClassLinker::LinkSuperClass(const SirtRef<mirror::Class>& klass) {
}
// Populate the class vtable and itable. Compute return type indices.
-bool ClassLinker::LinkMethods(const SirtRef<mirror::Class>& klass,
- const SirtRef<mirror::ObjectArray<mirror::Class> >& interfaces) {
+bool ClassLinker::LinkMethods(const Handle<mirror::Class>& klass,
+ const Handle<mirror::ObjectArray<mirror::Class> >& interfaces) {
if (klass->IsInterface()) {
// No vtable.
size_t count = klass->NumVirtualMethods();
if (!IsUint(16, count)) {
- ThrowClassFormatError(klass.get(), "Too many methods on interface: %zd", count);
+ ThrowClassFormatError(klass.Get(), "Too many methods on interface: %zd", count);
return false;
}
for (size_t i = 0; i < count; ++i) {
@@ -3497,16 +3523,17 @@ bool ClassLinker::LinkMethods(const SirtRef<mirror::Class>& klass,
return true;
}
-bool ClassLinker::LinkVirtualMethods(const SirtRef<mirror::Class>& klass) {
+bool ClassLinker::LinkVirtualMethods(const Handle<mirror::Class>& klass) {
Thread* self = Thread::Current();
if (klass->HasSuperClass()) {
uint32_t max_count = klass->NumVirtualMethods() + klass->GetSuperClass()->GetVTable()->GetLength();
size_t actual_count = klass->GetSuperClass()->GetVTable()->GetLength();
CHECK_LE(actual_count, max_count);
// TODO: do not assign to the vtable field until it is fully constructed.
- SirtRef<mirror::ObjectArray<mirror::ArtMethod> >
- vtable(self, klass->GetSuperClass()->GetVTable()->CopyOf(self, max_count));
- if (UNLIKELY(vtable.get() == NULL)) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ObjectArray<mirror::ArtMethod>> vtable(
+ hs.NewHandle(klass->GetSuperClass()->GetVTable()->CopyOf(self, max_count)));
+ if (UNLIKELY(vtable.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
return false;
}
@@ -3521,7 +3548,7 @@ bool ClassLinker::LinkVirtualMethods(const SirtRef<mirror::Class>& klass) {
if (local_mh.HasSameNameAndSignature(&super_mh)) {
if (klass->CanAccessMember(super_method->GetDeclaringClass(), super_method->GetAccessFlags())) {
if (super_method->IsFinal()) {
- ThrowLinkageError(klass.get(), "Method %s overrides final method in class %s",
+ ThrowLinkageError(klass.Get(), "Method %s overrides final method in class %s",
PrettyMethod(local_method).c_str(),
super_mh.GetDeclaringClassDescriptor());
return false;
@@ -3544,29 +3571,30 @@ bool ClassLinker::LinkVirtualMethods(const SirtRef<mirror::Class>& klass) {
}
}
if (!IsUint(16, actual_count)) {
- ThrowClassFormatError(klass.get(), "Too many methods defined on class: %zd", actual_count);
+ ThrowClassFormatError(klass.Get(), "Too many methods defined on class: %zd", actual_count);
return false;
}
// Shrink vtable if possible
CHECK_LE(actual_count, max_count);
if (actual_count < max_count) {
- vtable.reset(vtable->CopyOf(self, actual_count));
- if (UNLIKELY(vtable.get() == NULL)) {
+ vtable.Assign(vtable->CopyOf(self, actual_count));
+ if (UNLIKELY(vtable.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
return false;
}
}
- klass->SetVTable(vtable.get());
+ klass->SetVTable(vtable.Get());
} else {
- CHECK(klass.get() == GetClassRoot(kJavaLangObject));
+ CHECK(klass.Get() == GetClassRoot(kJavaLangObject));
uint32_t num_virtual_methods = klass->NumVirtualMethods();
if (!IsUint(16, num_virtual_methods)) {
- ThrowClassFormatError(klass.get(), "Too many methods: %d", num_virtual_methods);
+ ThrowClassFormatError(klass.Get(), "Too many methods: %d", num_virtual_methods);
return false;
}
- SirtRef<mirror::ObjectArray<mirror::ArtMethod> >
- vtable(self, AllocArtMethodArray(self, num_virtual_methods));
- if (UNLIKELY(vtable.get() == NULL)) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ObjectArray<mirror::ArtMethod> >
+ vtable(hs.NewHandle(AllocArtMethodArray(self, num_virtual_methods)));
+ if (UNLIKELY(vtable.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
return false;
}
@@ -3575,13 +3603,13 @@ bool ClassLinker::LinkVirtualMethods(const SirtRef<mirror::Class>& klass) {
vtable->Set<false>(i, virtual_method);
virtual_method->SetMethodIndex(i & 0xFFFF);
}
- klass->SetVTable(vtable.get());
+ klass->SetVTable(vtable.Get());
}
return true;
}
-bool ClassLinker::LinkInterfaceMethods(const SirtRef<mirror::Class>& klass,
- const SirtRef<mirror::ObjectArray<mirror::Class> >& interfaces) {
+bool ClassLinker::LinkInterfaceMethods(const Handle<mirror::Class>& klass,
+ const Handle<mirror::ObjectArray<mirror::Class> >& interfaces) {
// Set the imt table to be all conflicts by default.
klass->SetImTable(Runtime::Current()->GetDefaultImt());
size_t super_ifcount;
@@ -3593,13 +3621,13 @@ bool ClassLinker::LinkInterfaceMethods(const SirtRef<mirror::Class>& klass,
size_t ifcount = super_ifcount;
uint32_t num_interfaces;
{
- ClassHelper kh(klass.get());
+ ClassHelper kh(klass.Get());
num_interfaces =
- interfaces.get() == nullptr ? kh.NumDirectInterfaces() : interfaces->GetLength();
+ interfaces.Get() == nullptr ? kh.NumDirectInterfaces() : interfaces->GetLength();
ifcount += num_interfaces;
for (size_t i = 0; i < num_interfaces; i++) {
mirror::Class* interface =
- interfaces.get() == nullptr ? kh.GetDirectInterface(i) : interfaces->Get(i);
+ interfaces.Get() == nullptr ? kh.GetDirectInterface(i) : interfaces->Get(i);
ifcount += interface->GetIfTableCount();
}
}
@@ -3626,8 +3654,9 @@ bool ClassLinker::LinkInterfaceMethods(const SirtRef<mirror::Class>& klass,
}
}
Thread* self = Thread::Current();
- SirtRef<mirror::IfTable> iftable(self, AllocIfTable(self, ifcount));
- if (UNLIKELY(iftable.get() == NULL)) {
+ StackHandleScope<2> hs(self);
+ Handle<mirror::IfTable> iftable(hs.NewHandle(AllocIfTable(self, ifcount)));
+ if (UNLIKELY(iftable.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
return false;
}
@@ -3641,14 +3670,14 @@ bool ClassLinker::LinkInterfaceMethods(const SirtRef<mirror::Class>& klass,
// Flatten the interface inheritance hierarchy.
size_t idx = super_ifcount;
for (size_t i = 0; i < num_interfaces; i++) {
- ClassHelper kh(klass.get());
+ ClassHelper kh(klass.Get());
mirror::Class* interface =
- interfaces.get() == nullptr ? kh.GetDirectInterface(i) : interfaces->Get(i);
+ interfaces.Get() == nullptr ? kh.GetDirectInterface(i) : interfaces->Get(i);
DCHECK(interface != NULL);
if (!interface->IsInterface()) {
ClassHelper ih(interface);
- ThrowIncompatibleClassChangeError(klass.get(), "Class %s implements non-interface class %s",
- PrettyDescriptor(klass.get()).c_str(),
+ ThrowIncompatibleClassChangeError(klass.Get(), "Class %s implements non-interface class %s",
+ PrettyDescriptor(klass.Get()).c_str(),
PrettyDescriptor(ih.GetDescriptor()).c_str());
return false;
}
@@ -3683,8 +3712,8 @@ bool ClassLinker::LinkInterfaceMethods(const SirtRef<mirror::Class>& klass,
}
// Shrink iftable in case duplicates were found
if (idx < ifcount) {
- iftable.reset(down_cast<mirror::IfTable*>(iftable->CopyOf(self, idx * mirror::IfTable::kMax)));
- if (UNLIKELY(iftable.get() == NULL)) {
+ iftable.Assign(down_cast<mirror::IfTable*>(iftable->CopyOf(self, idx * mirror::IfTable::kMax)));
+ if (UNLIKELY(iftable.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
return false;
}
@@ -3692,7 +3721,7 @@ bool ClassLinker::LinkInterfaceMethods(const SirtRef<mirror::Class>& klass,
} else {
CHECK_EQ(idx, ifcount);
}
- klass->SetIfTable(iftable.get());
+ klass->SetIfTable(iftable.Get());
// If we're an interface, we don't need the vtable pointers, so we're done.
if (klass->IsInterface()) {
@@ -3700,8 +3729,9 @@ bool ClassLinker::LinkInterfaceMethods(const SirtRef<mirror::Class>& klass,
}
// Allocate imtable
bool imtable_changed = false;
- SirtRef<mirror::ObjectArray<mirror::ArtMethod> > imtable(self, AllocArtMethodArray(self, kImtSize));
- if (UNLIKELY(imtable.get() == NULL)) {
+ Handle<mirror::ObjectArray<mirror::ArtMethod> > imtable(
+ hs.NewHandle(AllocArtMethodArray(self, kImtSize)));
+ if (UNLIKELY(imtable.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
return false;
}
@@ -3709,15 +3739,16 @@ bool ClassLinker::LinkInterfaceMethods(const SirtRef<mirror::Class>& klass,
for (size_t i = 0; i < ifcount; ++i) {
size_t num_methods = iftable->GetInterface(i)->NumVirtualMethods();
if (num_methods > 0) {
- SirtRef<mirror::ObjectArray<mirror::ArtMethod> >
- method_array(self, AllocArtMethodArray(self, num_methods));
- if (UNLIKELY(method_array.get() == nullptr)) {
+ StackHandleScope<2> hs(self);
+ Handle<mirror::ObjectArray<mirror::ArtMethod> >
+ method_array(hs.NewHandle(AllocArtMethodArray(self, num_methods)));
+ if (UNLIKELY(method_array.Get() == nullptr)) {
CHECK(self->IsExceptionPending()); // OOME.
return false;
}
- iftable->SetMethodArray(i, method_array.get());
- SirtRef<mirror::ObjectArray<mirror::ArtMethod> > vtable(self,
- klass->GetVTableDuringLinking());
+ iftable->SetMethodArray(i, method_array.Get());
+ Handle<mirror::ObjectArray<mirror::ArtMethod> > vtable(
+ hs.NewHandle(klass->GetVTableDuringLinking()));
for (size_t j = 0; j < num_methods; ++j) {
mirror::ArtMethod* interface_method = iftable->GetInterface(i)->GetVirtualMethod(j);
MethodHelper interface_mh(interface_method);
@@ -3735,7 +3766,7 @@ bool ClassLinker::LinkInterfaceMethods(const SirtRef<mirror::Class>& klass,
MethodHelper vtable_mh(vtable_method);
if (interface_mh.HasSameNameAndSignature(&vtable_mh)) {
if (!vtable_method->IsAbstract() && !vtable_method->IsPublic()) {
- ThrowIllegalAccessError(klass.get(),
+ ThrowIllegalAccessError(klass.Get(),
"Method '%s' implementing interface method '%s' is not public",
PrettyMethod(vtable_method).c_str(),
PrettyMethod(interface_method).c_str());
@@ -3754,26 +3785,27 @@ bool ClassLinker::LinkInterfaceMethods(const SirtRef<mirror::Class>& klass,
}
}
if (k < 0) {
- SirtRef<mirror::ArtMethod> miranda_method(self, NULL);
+ StackHandleScope<1> hs(self);
+ auto miranda_method = hs.NewHandle<mirror::ArtMethod>(nullptr);
for (size_t mir = 0; mir < miranda_list.size(); mir++) {
mirror::ArtMethod* mir_method = miranda_list[mir];
MethodHelper vtable_mh(mir_method);
if (interface_mh.HasSameNameAndSignature(&vtable_mh)) {
- miranda_method.reset(miranda_list[mir]);
+ miranda_method.Assign(miranda_list[mir]);
break;
}
}
- if (miranda_method.get() == NULL) {
+ if (miranda_method.Get() == NULL) {
// Point the interface table at a phantom slot.
- miranda_method.reset(down_cast<mirror::ArtMethod*>(interface_method->Clone(self)));
- if (UNLIKELY(miranda_method.get() == NULL)) {
+ miranda_method.Assign(down_cast<mirror::ArtMethod*>(interface_method->Clone(self)));
+ if (UNLIKELY(miranda_method.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
return false;
}
// TODO: If a methods move then the miranda_list may hold stale references.
- miranda_list.push_back(miranda_method.get());
+ miranda_list.push_back(miranda_method.Get());
}
- method_array->Set<false>(j, miranda_method.get());
+ method_array->Set<false>(j, miranda_method.Get());
}
}
}
@@ -3786,7 +3818,7 @@ bool ClassLinker::LinkInterfaceMethods(const SirtRef<mirror::Class>& klass,
imtable->Set<false>(i, imt_conflict_method);
}
}
- klass->SetImTable(imtable.get());
+ klass->SetImTable(imtable.Get());
}
if (!miranda_list.empty()) {
int old_method_count = klass->NumVirtualMethods();
@@ -3803,13 +3835,14 @@ bool ClassLinker::LinkInterfaceMethods(const SirtRef<mirror::Class>& klass,
}
klass->SetVirtualMethods(virtuals);
- SirtRef<mirror::ObjectArray<mirror::ArtMethod> >
- vtable(self, klass->GetVTableDuringLinking());
- CHECK(vtable.get() != NULL);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ObjectArray<mirror::ArtMethod> > vtable(
+ hs.NewHandle(klass->GetVTableDuringLinking()));
+ CHECK(vtable.Get() != NULL);
int old_vtable_count = vtable->GetLength();
int new_vtable_count = old_vtable_count + miranda_list.size();
- vtable.reset(vtable->CopyOf(self, new_vtable_count));
- if (UNLIKELY(vtable.get() == NULL)) {
+ vtable.Assign(vtable->CopyOf(self, new_vtable_count));
+ if (UNLIKELY(vtable.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
return false;
}
@@ -3822,7 +3855,7 @@ bool ClassLinker::LinkInterfaceMethods(const SirtRef<mirror::Class>& klass,
vtable->Set<false>(old_vtable_count + i, method);
}
// TODO: do not assign to the vtable field until it is fully constructed.
- klass->SetVTable(vtable.get());
+ klass->SetVTable(vtable.Get());
}
mirror::ObjectArray<mirror::ArtMethod>* vtable = klass->GetVTableDuringLinking();
@@ -3835,13 +3868,13 @@ bool ClassLinker::LinkInterfaceMethods(const SirtRef<mirror::Class>& klass,
return true;
}
-bool ClassLinker::LinkInstanceFields(const SirtRef<mirror::Class>& klass) {
- CHECK(klass.get() != NULL);
+bool ClassLinker::LinkInstanceFields(const Handle<mirror::Class>& klass) {
+ CHECK(klass.Get() != NULL);
return LinkFields(klass, false);
}
-bool ClassLinker::LinkStaticFields(const SirtRef<mirror::Class>& klass) {
- CHECK(klass.get() != NULL);
+bool ClassLinker::LinkStaticFields(const Handle<mirror::Class>& klass) {
+ CHECK(klass.Get() != NULL);
size_t allocated_class_size = klass->GetClassSize();
bool success = LinkFields(klass, true);
CHECK_EQ(allocated_class_size, klass->GetClassSize());
@@ -3877,7 +3910,7 @@ struct LinkFieldsComparator {
}
};
-bool ClassLinker::LinkFields(const SirtRef<mirror::Class>& klass, bool is_static) {
+bool ClassLinker::LinkFields(const Handle<mirror::Class>& klass, bool is_static) {
size_t num_fields =
is_static ? klass->NumStaticFields() : klass->NumInstanceFields();
@@ -3972,7 +4005,7 @@ bool ClassLinker::LinkFields(const SirtRef<mirror::Class>& klass, bool is_static
// We lie to the GC about the java.lang.ref.Reference.referent field, so it doesn't scan it.
if (!is_static &&
- (strcmp("Ljava/lang/ref/Reference;", ClassHelper(klass.get()).GetDescriptor()) == 0)) {
+ (strcmp("Ljava/lang/ref/Reference;", ClassHelper(klass.Get()).GetDescriptor()) == 0)) {
// We know there are no non-reference fields in the Reference classes, and we know
// that 'referent' is alphabetically last, so this is easy...
CHECK_EQ(num_reference_fields, num_fields);
@@ -3989,7 +4022,7 @@ bool ClassLinker::LinkFields(const SirtRef<mirror::Class>& klass, bool is_static
mirror::ArtField* field = fields->Get(i);
if (false) { // enable to debug field layout
LOG(INFO) << "LinkFields: " << (is_static ? "static" : "instance")
- << " class=" << PrettyClass(klass.get())
+ << " class=" << PrettyClass(klass.Get())
<< " field=" << PrettyField(field)
<< " offset="
<< field->GetField32(MemberOffset(mirror::ArtField::OffsetOffset()));
@@ -3997,7 +4030,7 @@ bool ClassLinker::LinkFields(const SirtRef<mirror::Class>& klass, bool is_static
FieldHelper fh(field);
Primitive::Type type = fh.GetTypeAsPrimitiveType();
bool is_primitive = type != Primitive::kPrimNot;
- if ((strcmp("Ljava/lang/ref/Reference;", ClassHelper(klass.get()).GetDescriptor()) == 0)
+ if ((strcmp("Ljava/lang/ref/Reference;", ClassHelper(klass.Get()).GetDescriptor()) == 0)
&& (strcmp("referent", fh.GetName()) == 0)) {
is_primitive = true; // We lied above, so we have to expect a lie here.
}
@@ -4022,7 +4055,7 @@ bool ClassLinker::LinkFields(const SirtRef<mirror::Class>& klass, bool is_static
} else {
klass->SetNumReferenceInstanceFields(num_reference_fields);
if (!klass->IsVariableSize()) {
- DCHECK_GE(size, sizeof(mirror::Object)) << ClassHelper(klass.get()).GetDescriptor();
+ DCHECK_GE(size, sizeof(mirror::Object)) << ClassHelper(klass.Get()).GetDescriptor();
size_t previous_size = klass->GetObjectSize();
if (previous_size != 0) {
// Make sure that we didn't originally have an incorrect size.
@@ -4036,7 +4069,7 @@ bool ClassLinker::LinkFields(const SirtRef<mirror::Class>& klass, bool is_static
// Set the bitmap of reference offsets, refOffsets, from the ifields
// list.
-void ClassLinker::CreateReferenceInstanceOffsets(const SirtRef<mirror::Class>& klass) {
+void ClassLinker::CreateReferenceInstanceOffsets(const Handle<mirror::Class>& klass) {
uint32_t reference_offsets = 0;
mirror::Class* super_class = klass->GetSuperClass();
if (super_class != NULL) {
@@ -4050,11 +4083,11 @@ void ClassLinker::CreateReferenceInstanceOffsets(const SirtRef<mirror::Class>& k
CreateReferenceOffsets(klass, false, reference_offsets);
}
-void ClassLinker::CreateReferenceStaticOffsets(const SirtRef<mirror::Class>& klass) {
+void ClassLinker::CreateReferenceStaticOffsets(const Handle<mirror::Class>& klass) {
CreateReferenceOffsets(klass, true, 0);
}
-void ClassLinker::CreateReferenceOffsets(const SirtRef<mirror::Class>& klass, bool is_static,
+void ClassLinker::CreateReferenceOffsets(const Handle<mirror::Class>& klass, bool is_static,
uint32_t reference_offsets) {
size_t num_reference_fields =
is_static ? klass->NumReferenceStaticFieldsDuringLinking()
@@ -4087,8 +4120,8 @@ void ClassLinker::CreateReferenceOffsets(const SirtRef<mirror::Class>& klass, bo
}
mirror::String* ClassLinker::ResolveString(const DexFile& dex_file, uint32_t string_idx,
- const SirtRef<mirror::DexCache>& dex_cache) {
- DCHECK(dex_cache.get() != nullptr);
+ const Handle<mirror::DexCache>& dex_cache) {
+ DCHECK(dex_cache.Get() != nullptr);
mirror::String* resolved = dex_cache->GetResolvedString(string_idx);
if (resolved != NULL) {
return resolved;
@@ -4102,16 +4135,16 @@ mirror::String* ClassLinker::ResolveString(const DexFile& dex_file, uint32_t str
mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file, uint16_t type_idx,
mirror::Class* referrer) {
- Thread* self = Thread::Current();
- SirtRef<mirror::DexCache> dex_cache(self, referrer->GetDexCache());
- SirtRef<mirror::ClassLoader> class_loader(self, referrer->GetClassLoader());
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(referrer->GetClassLoader()));
return ResolveType(dex_file, type_idx, dex_cache, class_loader);
}
mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file, uint16_t type_idx,
- const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader) {
- DCHECK(dex_cache.get() != NULL);
+ const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader) {
+ DCHECK(dex_cache.Get() != NULL);
mirror::Class* resolved = dex_cache->GetResolvedType(type_idx);
if (resolved == NULL) {
Thread* self = Thread::Current();
@@ -4126,12 +4159,13 @@ mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file, uint16_t type_i
CHECK(self->IsExceptionPending())
<< "Expected pending exception for failed resolution of: " << descriptor;
// Convert a ClassNotFoundException to a NoClassDefFoundError.
- SirtRef<mirror::Throwable> cause(self, self->GetException(NULL));
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Throwable> cause(hs.NewHandle(self->GetException(nullptr)));
if (cause->InstanceOf(GetClassRoot(kJavaLangClassNotFoundException))) {
- DCHECK(resolved == NULL); // No SirtRef needed to preserve resolved.
+ DCHECK(resolved == NULL); // No Handle needed to preserve resolved.
self->ClearException();
ThrowNoClassDefFoundError("Failed resolution of: %s", descriptor);
- self->GetException(NULL)->SetCause(cause.get());
+ self->GetException(NULL)->SetCause(cause.Get());
}
}
}
@@ -4142,11 +4176,11 @@ mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file, uint16_t type_i
mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file,
uint32_t method_idx,
- const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader,
+ const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader,
mirror::ArtMethod* referrer,
InvokeType type) {
- DCHECK(dex_cache.get() != NULL);
+ DCHECK(dex_cache.Get() != NULL);
// Check for hit in the dex cache.
mirror::ArtMethod* resolved = dex_cache->GetResolvedMethod(method_idx);
if (resolved != NULL && !resolved->IsRuntimeMethod()) {
@@ -4164,15 +4198,15 @@ mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file,
switch (type) {
case kDirect: // Fall-through.
case kStatic:
- resolved = klass->FindDirectMethod(dex_cache.get(), method_idx);
+ resolved = klass->FindDirectMethod(dex_cache.Get(), method_idx);
break;
case kInterface:
- resolved = klass->FindInterfaceMethod(dex_cache.get(), method_idx);
+ resolved = klass->FindInterfaceMethod(dex_cache.Get(), method_idx);
DCHECK(resolved == NULL || resolved->GetDeclaringClass()->IsInterface());
break;
case kSuper: // Fall-through.
case kVirtual:
- resolved = klass->FindVirtualMethod(dex_cache.get(), method_idx);
+ resolved = klass->FindVirtualMethod(dex_cache.Get(), method_idx);
break;
default:
LOG(FATAL) << "Unreachable - invocation type: " << type;
@@ -4288,10 +4322,10 @@ mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file,
}
mirror::ArtField* ClassLinker::ResolveField(const DexFile& dex_file, uint32_t field_idx,
- const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader,
+ const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader,
bool is_static) {
- DCHECK(dex_cache.get() != nullptr);
+ DCHECK(dex_cache.Get() != nullptr);
mirror::ArtField* resolved = dex_cache->GetResolvedField(field_idx);
if (resolved != NULL) {
return resolved;
@@ -4304,9 +4338,9 @@ mirror::ArtField* ClassLinker::ResolveField(const DexFile& dex_file, uint32_t fi
}
if (is_static) {
- resolved = klass->FindStaticField(dex_cache.get(), field_idx);
+ resolved = klass->FindStaticField(dex_cache.Get(), field_idx);
} else {
- resolved = klass->FindInstanceField(dex_cache.get(), field_idx);
+ resolved = klass->FindInstanceField(dex_cache.Get(), field_idx);
}
if (resolved == NULL) {
@@ -4328,9 +4362,9 @@ mirror::ArtField* ClassLinker::ResolveField(const DexFile& dex_file, uint32_t fi
mirror::ArtField* ClassLinker::ResolveFieldJLS(const DexFile& dex_file,
uint32_t field_idx,
- const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader) {
- DCHECK(dex_cache.get() != nullptr);
+ const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader) {
+ DCHECK(dex_cache.Get() != nullptr);
mirror::ArtField* resolved = dex_cache->GetResolvedField(field_idx);
if (resolved != NULL) {
return resolved;
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 2c6873e..3dac6e5 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -47,7 +47,7 @@ namespace mirror {
class InternTable;
template<class T> class ObjectLock;
class ScopedObjectAccess;
-template<class T> class SirtRef;
+template<class T> class Handle;
typedef bool (ClassVisitor)(mirror::Class* c, void* arg);
@@ -75,7 +75,7 @@ class ClassLinker {
// Finds a class by its descriptor, loading it if necessary.
// If class_loader is null, searches boot_class_path_.
mirror::Class* FindClass(Thread* self, const char* descriptor,
- const SirtRef<mirror::ClassLoader>& class_loader)
+ const Handle<mirror::ClassLoader>& class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Finds a class by its descriptor using the "system" class loader, ie by searching the
@@ -92,7 +92,7 @@ class ClassLinker {
// Define a new a class based on a ClassDef from a DexFile
mirror::Class* DefineClass(const char* descriptor,
- const SirtRef<mirror::ClassLoader>& class_loader,
+ const Handle<mirror::ClassLoader>& class_loader,
const DexFile& dex_file, const DexFile::ClassDef& dex_class_def)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -136,7 +136,7 @@ class ClassLinker {
// Resolve a String with the given index from the DexFile, storing the
// result in the DexCache.
mirror::String* ResolveString(const DexFile& dex_file, uint32_t string_idx,
- const SirtRef<mirror::DexCache>& dex_cache)
+ const Handle<mirror::DexCache>& dex_cache)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Resolve a Type with the given index from the DexFile, storing the
@@ -159,8 +159,8 @@ class ClassLinker {
// type, since it may be referenced from but not contained within
// the given DexFile.
mirror::Class* ResolveType(const DexFile& dex_file, uint16_t type_idx,
- const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader)
+ const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Resolve a method with a given ID from the DexFile, storing the
@@ -170,8 +170,8 @@ class ClassLinker {
// virtual method.
mirror::ArtMethod* ResolveMethod(const DexFile& dex_file,
uint32_t method_idx,
- const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader,
+ const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader,
mirror::ArtMethod* referrer,
InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -191,8 +191,8 @@ class ClassLinker {
// field.
mirror::ArtField* ResolveField(const DexFile& dex_file,
uint32_t field_idx,
- const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader,
+ const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader,
bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -202,8 +202,8 @@ class ClassLinker {
// field resolution semantics are followed.
mirror::ArtField* ResolveFieldJLS(const DexFile& dex_file,
uint32_t field_idx,
- const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader)
+ const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get shorty from method index without resolution. Used to do handlerization.
@@ -213,7 +213,7 @@ class ClassLinker {
// Returns true on success, false if there's an exception pending.
// can_run_clinit=false allows the compiler to attempt to init a class,
// given the restriction that no <clinit> execution is possible.
- bool EnsureInitialized(const SirtRef<mirror::Class>& c,
+ bool EnsureInitialized(const Handle<mirror::Class>& c,
bool can_init_fields, bool can_init_parents)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -224,7 +224,7 @@ class ClassLinker {
void RegisterDexFile(const DexFile& dex_file)
LOCKS_EXCLUDED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void RegisterDexFile(const DexFile& dex_file, const SirtRef<mirror::DexCache>& dex_cache)
+ void RegisterDexFile(const DexFile& dex_file, const Handle<mirror::DexCache>& dex_cache)
LOCKS_EXCLUDED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -315,12 +315,12 @@ class ClassLinker {
size_t length)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void VerifyClass(const SirtRef<mirror::Class>& klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void VerifyClass(const Handle<mirror::Class>& klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool VerifyClassUsingOatFile(const DexFile& dex_file, mirror::Class* klass,
mirror::Class::Status& oat_file_class_status)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void ResolveClassExceptionHandlerTypes(const DexFile& dex_file,
- const SirtRef<mirror::Class>& klass)
+ const Handle<mirror::Class>& klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, mirror::ArtMethod* klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -418,12 +418,12 @@ class ClassLinker {
mirror::Class* CreateArrayClass(Thread* self, const char* descriptor,
- const SirtRef<mirror::ClassLoader>& class_loader)
+ const Handle<mirror::ClassLoader>& class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void AppendToBootClassPath(const DexFile& dex_file)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void AppendToBootClassPath(const DexFile& dex_file, const SirtRef<mirror::DexCache>& dex_cache)
+ void AppendToBootClassPath(const DexFile& dex_file, const Handle<mirror::DexCache>& dex_cache)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void ConstructFieldMap(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def,
@@ -435,23 +435,23 @@ class ClassLinker {
void LoadClass(const DexFile& dex_file,
const DexFile::ClassDef& dex_class_def,
- const SirtRef<mirror::Class>& klass,
+ const Handle<mirror::Class>& klass,
mirror::ClassLoader* class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void LoadClassMembers(const DexFile& dex_file,
const byte* class_data,
- const SirtRef<mirror::Class>& klass,
+ const Handle<mirror::Class>& klass,
mirror::ClassLoader* class_loader,
const OatFile::OatClass* oat_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void LoadField(const DexFile& dex_file, const ClassDataItemIterator& it,
- const SirtRef<mirror::Class>& klass, const SirtRef<mirror::ArtField>& dst)
+ const Handle<mirror::Class>& klass, const Handle<mirror::ArtField>& dst)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::ArtMethod* LoadMethod(Thread* self, const DexFile& dex_file,
const ClassDataItemIterator& dex_method,
- const SirtRef<mirror::Class>& klass)
+ const Handle<mirror::Class>& klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void FixupStaticTrampolines(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -460,23 +460,23 @@ class ClassLinker {
OatFile::OatClass GetOatClass(const DexFile& dex_file, uint16_t class_def_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void RegisterDexFileLocked(const DexFile& dex_file, const SirtRef<mirror::DexCache>& dex_cache)
+ void RegisterDexFileLocked(const DexFile& dex_file, const Handle<mirror::DexCache>& dex_cache)
EXCLUSIVE_LOCKS_REQUIRED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsDexFileRegisteredLocked(const DexFile& dex_file) const
SHARED_LOCKS_REQUIRED(dex_lock_, Locks::mutator_lock_);
- bool InitializeClass(const SirtRef<mirror::Class>& klass, bool can_run_clinit,
+ bool InitializeClass(const Handle<mirror::Class>& klass, bool can_run_clinit,
bool can_init_parents)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool WaitForInitializeClass(const SirtRef<mirror::Class>& klass, Thread* self,
+ bool WaitForInitializeClass(const Handle<mirror::Class>& klass, Thread* self,
ObjectLock<mirror::Class>& lock);
- bool ValidateSuperClassDescriptors(const SirtRef<mirror::Class>& klass)
+ bool ValidateSuperClassDescriptors(const Handle<mirror::Class>& klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsSameDescriptorInDifferentClassContexts(Thread* self, const char* descriptor,
- SirtRef<mirror::ClassLoader>& class_loader1,
- SirtRef<mirror::ClassLoader>& class_loader2)
+ Handle<mirror::ClassLoader>& class_loader1,
+ Handle<mirror::ClassLoader>& class_loader2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsSameMethodSignatureInDifferentClassContexts(Thread* self, mirror::ArtMethod* method,
@@ -484,40 +484,40 @@ class ClassLinker {
mirror::Class* klass2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LinkClass(Thread* self, const SirtRef<mirror::Class>& klass,
- const SirtRef<mirror::ObjectArray<mirror::Class> >& interfaces)
+ bool LinkClass(Thread* self, const Handle<mirror::Class>& klass,
+ const Handle<mirror::ObjectArray<mirror::Class> >& interfaces)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LinkSuperClass(const SirtRef<mirror::Class>& klass)
+ bool LinkSuperClass(const Handle<mirror::Class>& klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LoadSuperAndInterfaces(const SirtRef<mirror::Class>& klass, const DexFile& dex_file)
+ bool LoadSuperAndInterfaces(const Handle<mirror::Class>& klass, const DexFile& dex_file)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LinkMethods(const SirtRef<mirror::Class>& klass,
- const SirtRef<mirror::ObjectArray<mirror::Class> >& interfaces)
+ bool LinkMethods(const Handle<mirror::Class>& klass,
+ const Handle<mirror::ObjectArray<mirror::Class> >& interfaces)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LinkVirtualMethods(const SirtRef<mirror::Class>& klass)
+ bool LinkVirtualMethods(const Handle<mirror::Class>& klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LinkInterfaceMethods(const SirtRef<mirror::Class>& klass,
- const SirtRef<mirror::ObjectArray<mirror::Class> >& interfaces)
+ bool LinkInterfaceMethods(const Handle<mirror::Class>& klass,
+ const Handle<mirror::ObjectArray<mirror::Class> >& interfaces)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LinkStaticFields(const SirtRef<mirror::Class>& klass)
+ bool LinkStaticFields(const Handle<mirror::Class>& klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LinkInstanceFields(const SirtRef<mirror::Class>& klass)
+ bool LinkInstanceFields(const Handle<mirror::Class>& klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool LinkFields(const SirtRef<mirror::Class>& klass, bool is_static)
+ bool LinkFields(const Handle<mirror::Class>& klass, bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CreateReferenceInstanceOffsets(const SirtRef<mirror::Class>& klass)
+ void CreateReferenceInstanceOffsets(const Handle<mirror::Class>& klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CreateReferenceStaticOffsets(const SirtRef<mirror::Class>& klass)
+ void CreateReferenceStaticOffsets(const Handle<mirror::Class>& klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void CreateReferenceOffsets(const SirtRef<mirror::Class>& klass, bool is_static,
+ void CreateReferenceOffsets(const Handle<mirror::Class>& klass, bool is_static,
uint32_t reference_offsets)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -546,11 +546,11 @@ class ClassLinker {
bool* open_failed)
LOCKS_EXCLUDED(dex_lock_);
- mirror::ArtMethod* CreateProxyConstructor(Thread* self, const SirtRef<mirror::Class>& klass,
+ mirror::ArtMethod* CreateProxyConstructor(Thread* self, const Handle<mirror::Class>& klass,
mirror::Class* proxy_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ArtMethod* CreateProxyMethod(Thread* self, const SirtRef<mirror::Class>& klass,
- const SirtRef<mirror::ArtMethod>& prototype)
+ mirror::ArtMethod* CreateProxyMethod(Thread* self, const Handle<mirror::Class>& klass,
+ const Handle<mirror::ArtMethod>& prototype)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
std::vector<const DexFile*> boot_class_path_;
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index b68ab4a..0db08aa 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -34,7 +34,7 @@
#include "mirror/proxy.h"
#include "mirror/reference.h"
#include "mirror/stack_trace_element.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
namespace art {
@@ -99,9 +99,10 @@ class ClassLinkerTest : public CommonRuntimeTest {
mirror::ClassLoader* class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Thread* self = Thread::Current();
- SirtRef<mirror::ClassLoader> loader(self, class_loader);
- SirtRef<mirror::Class> array(self,
- class_linker_->FindClass(self, array_descriptor.c_str(), loader));
+ StackHandleScope<2> hs(self);
+ Handle<mirror::ClassLoader> loader(hs.NewHandle(class_loader));
+ Handle<mirror::Class> array(
+ hs.NewHandle(class_linker_->FindClass(self, array_descriptor.c_str(), loader)));
ClassHelper array_component_ch(array->GetComponentType());
EXPECT_STREQ(component_type.c_str(), array_component_ch.GetDescriptor());
EXPECT_EQ(class_loader, array->GetClassLoader());
@@ -109,10 +110,10 @@ class ClassLinkerTest : public CommonRuntimeTest {
AssertArrayClass(array_descriptor, array);
}
- void AssertArrayClass(const std::string& array_descriptor, const SirtRef<mirror::Class>& array)
+ void AssertArrayClass(const std::string& array_descriptor, const Handle<mirror::Class>& array)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ClassHelper kh(array.get());
- ASSERT_TRUE(array.get() != NULL);
+ ClassHelper kh(array.Get());
+ ASSERT_TRUE(array.Get() != NULL);
ASSERT_TRUE(array->GetClass() != NULL);
ASSERT_EQ(array->GetClass(), array->GetClass()->GetClass());
EXPECT_TRUE(array->GetClass()->GetSuperClass() != NULL);
@@ -141,17 +142,17 @@ class ClassLinkerTest : public CommonRuntimeTest {
EXPECT_EQ(0U, array->NumVirtualMethods());
EXPECT_EQ(0U, array->NumInstanceFields());
EXPECT_EQ(0U, array->NumStaticFields());
- kh.ChangeClass(array.get());
+ kh.ChangeClass(array.Get());
EXPECT_EQ(2U, kh.NumDirectInterfaces());
EXPECT_TRUE(array->GetVTable() != NULL);
EXPECT_EQ(2, array->GetIfTableCount());
ASSERT_TRUE(array->GetIfTable() != NULL);
kh.ChangeClass(kh.GetDirectInterface(0));
EXPECT_STREQ(kh.GetDescriptor(), "Ljava/lang/Cloneable;");
- kh.ChangeClass(array.get());
+ kh.ChangeClass(array.Get());
kh.ChangeClass(kh.GetDirectInterface(1));
EXPECT_STREQ(kh.GetDescriptor(), "Ljava/io/Serializable;");
- EXPECT_EQ(class_linker_->FindArrayClass(self, array->GetComponentType()), array.get());
+ EXPECT_EQ(class_linker_->FindArrayClass(self, array->GetComponentType()), array.Get());
}
void AssertMethod(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -182,9 +183,9 @@ class ClassLinkerTest : public CommonRuntimeTest {
EXPECT_TRUE(fh.GetType() != NULL);
}
- void AssertClass(const std::string& descriptor, const SirtRef<mirror::Class>& klass)
+ void AssertClass(const std::string& descriptor, const Handle<mirror::Class>& klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ClassHelper kh(klass.get());
+ ClassHelper kh(klass.Get());
EXPECT_STREQ(descriptor.c_str(), kh.GetDescriptor());
if (descriptor == "Ljava/lang/Object;") {
EXPECT_FALSE(klass->HasSuperClass());
@@ -200,7 +201,7 @@ class ClassLinkerTest : public CommonRuntimeTest {
EXPECT_FALSE(klass->IsErroneous());
EXPECT_FALSE(klass->IsArrayClass());
EXPECT_TRUE(klass->GetComponentType() == NULL);
- EXPECT_TRUE(klass->IsInSamePackage(klass.get()));
+ EXPECT_TRUE(klass->IsInSamePackage(klass.Get()));
EXPECT_TRUE(mirror::Class::IsInSamePackage(kh.GetDescriptor(), kh.GetDescriptor()));
if (klass->IsInterface()) {
EXPECT_TRUE(klass->IsAbstract());
@@ -242,31 +243,31 @@ class ClassLinkerTest : public CommonRuntimeTest {
}
EXPECT_FALSE(klass->IsPrimitive());
- EXPECT_TRUE(klass->CanAccess(klass.get()));
+ EXPECT_TRUE(klass->CanAccess(klass.Get()));
for (size_t i = 0; i < klass->NumDirectMethods(); i++) {
mirror::ArtMethod* method = klass->GetDirectMethod(i);
AssertMethod(method);
EXPECT_TRUE(method->IsDirect());
- EXPECT_EQ(klass.get(), method->GetDeclaringClass());
+ EXPECT_EQ(klass.Get(), method->GetDeclaringClass());
}
for (size_t i = 0; i < klass->NumVirtualMethods(); i++) {
mirror::ArtMethod* method = klass->GetVirtualMethod(i);
AssertMethod(method);
EXPECT_FALSE(method->IsDirect());
- EXPECT_TRUE(method->GetDeclaringClass()->IsAssignableFrom(klass.get()));
+ EXPECT_TRUE(method->GetDeclaringClass()->IsAssignableFrom(klass.Get()));
}
for (size_t i = 0; i < klass->NumInstanceFields(); i++) {
mirror::ArtField* field = klass->GetInstanceField(i);
- AssertField(klass.get(), field);
+ AssertField(klass.Get(), field);
EXPECT_FALSE(field->IsStatic());
}
for (size_t i = 0; i < klass->NumStaticFields(); i++) {
mirror::ArtField* field = klass->GetStaticField(i);
- AssertField(klass.get(), field);
+ AssertField(klass.Get(), field);
EXPECT_TRUE(field->IsStatic());
}
@@ -294,7 +295,7 @@ class ClassLinkerTest : public CommonRuntimeTest {
}
size_t total_num_reference_instance_fields = 0;
- mirror::Class* k = klass.get();
+ mirror::Class* k = klass.Get();
while (k != NULL) {
total_num_reference_instance_fields += k->NumReferenceInstanceFields();
k = k->GetSuperClass();
@@ -306,12 +307,14 @@ class ClassLinkerTest : public CommonRuntimeTest {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ASSERT_TRUE(descriptor != NULL);
Thread* self = Thread::Current();
- SirtRef<mirror::Class> klass(self, class_linker_->FindSystemClass(self, descriptor.c_str()));
- ASSERT_TRUE(klass.get() != nullptr);
- EXPECT_STREQ(descriptor.c_str(), ClassHelper(klass.get()).GetDescriptor());
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> klass(
+ hs.NewHandle(class_linker_->FindSystemClass(self, descriptor.c_str())));
+ ASSERT_TRUE(klass.Get() != nullptr);
+ EXPECT_STREQ(descriptor.c_str(), ClassHelper(klass.Get()).GetDescriptor());
EXPECT_EQ(class_loader, klass->GetClassLoader());
if (klass->IsPrimitive()) {
- AssertPrimitiveClass(descriptor, klass.get());
+ AssertPrimitiveClass(descriptor, klass.Get());
} else if (klass->IsArrayClass()) {
AssertArrayClass(descriptor, klass);
} else {
@@ -671,7 +674,9 @@ TEST_F(ClassLinkerTest, FindClassNonexistent) {
TEST_F(ClassLinkerTest, FindClassNested) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(LoadDex("Nested")));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("Nested"))));
mirror::Class* outer = class_linker_->FindClass(soa.Self(), "LNested;", class_loader);
ASSERT_TRUE(outer != NULL);
@@ -745,7 +750,9 @@ TEST_F(ClassLinkerTest, FindClass) {
EXPECT_EQ(0U, JavaLangObject->NumStaticFields());
EXPECT_EQ(0U, kh.NumDirectInterfaces());
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(LoadDex("MyClass")));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("MyClass"))));
AssertNonExistentClass("LMyClass;");
mirror::Class* MyClass = class_linker_->FindClass(soa.Self(), "LMyClass;", class_loader);
kh.ChangeClass(MyClass);
@@ -756,7 +763,7 @@ TEST_F(ClassLinkerTest, FindClass) {
ASSERT_STREQ(kh.GetDescriptor(), "LMyClass;");
EXPECT_TRUE(MyClass->GetSuperClass() == JavaLangObject);
EXPECT_TRUE(MyClass->HasSuperClass());
- EXPECT_EQ(class_loader.get(), MyClass->GetClassLoader());
+ EXPECT_EQ(class_loader.Get(), MyClass->GetClassLoader());
EXPECT_EQ(mirror::Class::kStatusResolved, MyClass->GetStatus());
EXPECT_FALSE(MyClass->IsErroneous());
EXPECT_TRUE(MyClass->IsLoaded());
@@ -784,7 +791,7 @@ TEST_F(ClassLinkerTest, FindClass) {
AssertArrayClass("[Ljava/lang/Object;", "Ljava/lang/Object;", NULL);
// synthesized on the fly
AssertArrayClass("[[C", "[C", NULL);
- AssertArrayClass("[[[LMyClass;", "[[LMyClass;", class_loader.get());
+ AssertArrayClass("[[[LMyClass;", "[[LMyClass;", class_loader.Get());
// or not available at all
AssertNonExistentClass("[[[[LNonExistentClass;");
}
@@ -813,27 +820,28 @@ TEST_F(ClassLinkerTest, ValidateObjectArrayElementsOffset) {
TEST_F(ClassLinkerTest, ValidatePrimitiveArrayElementsOffset) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::LongArray> long_array(soa.Self(), mirror::LongArray::Alloc(soa.Self(), 0));
+ StackHandleScope<5> hs(soa.Self());
+ Handle<mirror::LongArray> long_array(hs.NewHandle(mirror::LongArray::Alloc(soa.Self(), 0)));
EXPECT_EQ(class_linker_->FindSystemClass(soa.Self(), "[J"), long_array->GetClass());
uintptr_t data_offset = reinterpret_cast<uintptr_t>(long_array->GetData());
EXPECT_TRUE(IsAligned<8>(data_offset)); // Longs require 8 byte alignment
- SirtRef<mirror::DoubleArray> double_array(soa.Self(), mirror::DoubleArray::Alloc(soa.Self(), 0));
+ Handle<mirror::DoubleArray> double_array(hs.NewHandle(mirror::DoubleArray::Alloc(soa.Self(), 0)));
EXPECT_EQ(class_linker_->FindSystemClass(soa.Self(), "[D"), double_array->GetClass());
data_offset = reinterpret_cast<uintptr_t>(double_array->GetData());
EXPECT_TRUE(IsAligned<8>(data_offset)); // Doubles require 8 byte alignment
- SirtRef<mirror::IntArray> int_array(soa.Self(), mirror::IntArray::Alloc(soa.Self(), 0));
+ Handle<mirror::IntArray> int_array(hs.NewHandle(mirror::IntArray::Alloc(soa.Self(), 0)));
EXPECT_EQ(class_linker_->FindSystemClass(soa.Self(), "[I"), int_array->GetClass());
data_offset = reinterpret_cast<uintptr_t>(int_array->GetData());
EXPECT_TRUE(IsAligned<4>(data_offset)); // Ints require 4 byte alignment
- SirtRef<mirror::CharArray> char_array(soa.Self(), mirror::CharArray::Alloc(soa.Self(), 0));
+ Handle<mirror::CharArray> char_array(hs.NewHandle(mirror::CharArray::Alloc(soa.Self(), 0)));
EXPECT_EQ(class_linker_->FindSystemClass(soa.Self(), "[C"), char_array->GetClass());
data_offset = reinterpret_cast<uintptr_t>(char_array->GetData());
EXPECT_TRUE(IsAligned<2>(data_offset)); // Chars require 2 byte alignment
- SirtRef<mirror::ShortArray> short_array(soa.Self(), mirror::ShortArray::Alloc(soa.Self(), 0));
+ Handle<mirror::ShortArray> short_array(hs.NewHandle(mirror::ShortArray::Alloc(soa.Self(), 0)));
EXPECT_EQ(class_linker_->FindSystemClass(soa.Self(), "[S"), short_array->GetClass());
data_offset = reinterpret_cast<uintptr_t>(short_array->GetData());
EXPECT_TRUE(IsAligned<2>(data_offset)); // Shorts require 2 byte alignment
@@ -845,7 +853,8 @@ TEST_F(ClassLinkerTest, ValidateBoxedTypes) {
// Validate that the "value" field is always the 0th field in each of java.lang's box classes.
// This lets UnboxPrimitive avoid searching for the field by name at runtime.
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), nullptr);
+ StackHandleScope<1> hs(soa.Self());
+ auto class_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
mirror::Class* c;
c = class_linker_->FindClass(soa.Self(), "Ljava/lang/Boolean;", class_loader);
FieldHelper fh(c->GetIFields()->Get(0));
@@ -875,21 +884,25 @@ TEST_F(ClassLinkerTest, ValidateBoxedTypes) {
TEST_F(ClassLinkerTest, TwoClassLoadersOneClass) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> class_loader_1(soa.Self(), soa.Decode<mirror::ClassLoader*>(LoadDex("MyClass")));
- SirtRef<mirror::ClassLoader> class_loader_2(soa.Self(), soa.Decode<mirror::ClassLoader*>(LoadDex("MyClass")));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader_1(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("MyClass"))));
+ Handle<mirror::ClassLoader> class_loader_2(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("MyClass"))));
mirror::Class* MyClass_1 = class_linker_->FindClass(soa.Self(), "LMyClass;", class_loader_1);
mirror::Class* MyClass_2 = class_linker_->FindClass(soa.Self(), "LMyClass;", class_loader_2);
- EXPECT_TRUE(MyClass_1 != NULL);
- EXPECT_TRUE(MyClass_2 != NULL);
+ EXPECT_TRUE(MyClass_1 != nullptr);
+ EXPECT_TRUE(MyClass_2 != nullptr);
EXPECT_NE(MyClass_1, MyClass_2);
}
TEST_F(ClassLinkerTest, StaticFields) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(LoadDex("Statics")));
- SirtRef<mirror::Class> statics(soa.Self(), class_linker_->FindClass(soa.Self(), "LStatics;",
- class_loader));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("Statics"))));
+ Handle<mirror::Class> statics(
+ hs.NewHandle(class_linker_->FindClass(soa.Self(), "LStatics;", class_loader)));
class_linker_->EnsureInitialized(statics, true, true);
// Static final primitives that are initialized by a compile-time constant
@@ -904,74 +917,76 @@ TEST_F(ClassLinkerTest, StaticFields) {
FieldHelper fh(s0);
EXPECT_STREQ(ClassHelper(s0->GetClass()).GetDescriptor(), "Ljava/lang/reflect/ArtField;");
EXPECT_TRUE(fh.GetTypeAsPrimitiveType() == Primitive::kPrimBoolean);
- EXPECT_EQ(true, s0->GetBoolean(statics.get()));
- s0->SetBoolean<false>(statics.get(), false);
+ EXPECT_EQ(true, s0->GetBoolean(statics.Get()));
+ s0->SetBoolean<false>(statics.Get(), false);
mirror::ArtField* s1 = statics->FindStaticField("s1", "B");
fh.ChangeField(s1);
EXPECT_TRUE(fh.GetTypeAsPrimitiveType() == Primitive::kPrimByte);
- EXPECT_EQ(5, s1->GetByte(statics.get()));
- s1->SetByte<false>(statics.get(), 6);
+ EXPECT_EQ(5, s1->GetByte(statics.Get()));
+ s1->SetByte<false>(statics.Get(), 6);
mirror::ArtField* s2 = statics->FindStaticField("s2", "C");
fh.ChangeField(s2);
EXPECT_TRUE(fh.GetTypeAsPrimitiveType() == Primitive::kPrimChar);
- EXPECT_EQ('a', s2->GetChar(statics.get()));
- s2->SetChar<false>(statics.get(), 'b');
+ EXPECT_EQ('a', s2->GetChar(statics.Get()));
+ s2->SetChar<false>(statics.Get(), 'b');
mirror::ArtField* s3 = statics->FindStaticField("s3", "S");
fh.ChangeField(s3);
EXPECT_TRUE(fh.GetTypeAsPrimitiveType() == Primitive::kPrimShort);
- EXPECT_EQ(-536, s3->GetShort(statics.get()));
- s3->SetShort<false>(statics.get(), -535);
+ EXPECT_EQ(-536, s3->GetShort(statics.Get()));
+ s3->SetShort<false>(statics.Get(), -535);
mirror::ArtField* s4 = statics->FindStaticField("s4", "I");
fh.ChangeField(s4);
EXPECT_TRUE(fh.GetTypeAsPrimitiveType() == Primitive::kPrimInt);
- EXPECT_EQ(2000000000, s4->GetInt(statics.get()));
- s4->SetInt<false>(statics.get(), 2000000001);
+ EXPECT_EQ(2000000000, s4->GetInt(statics.Get()));
+ s4->SetInt<false>(statics.Get(), 2000000001);
mirror::ArtField* s5 = statics->FindStaticField("s5", "J");
fh.ChangeField(s5);
EXPECT_TRUE(fh.GetTypeAsPrimitiveType() == Primitive::kPrimLong);
- EXPECT_EQ(0x1234567890abcdefLL, s5->GetLong(statics.get()));
- s5->SetLong<false>(statics.get(), INT64_C(0x34567890abcdef12));
+ EXPECT_EQ(0x1234567890abcdefLL, s5->GetLong(statics.Get()));
+ s5->SetLong<false>(statics.Get(), INT64_C(0x34567890abcdef12));
mirror::ArtField* s6 = statics->FindStaticField("s6", "F");
fh.ChangeField(s6);
EXPECT_TRUE(fh.GetTypeAsPrimitiveType() == Primitive::kPrimFloat);
- EXPECT_EQ(0.5, s6->GetFloat(statics.get()));
- s6->SetFloat<false>(statics.get(), 0.75);
+ EXPECT_EQ(0.5, s6->GetFloat(statics.Get()));
+ s6->SetFloat<false>(statics.Get(), 0.75);
mirror::ArtField* s7 = statics->FindStaticField("s7", "D");
fh.ChangeField(s7);
EXPECT_TRUE(fh.GetTypeAsPrimitiveType() == Primitive::kPrimDouble);
- EXPECT_EQ(16777217, s7->GetDouble(statics.get()));
- s7->SetDouble<false>(statics.get(), 16777219);
+ EXPECT_EQ(16777217, s7->GetDouble(statics.Get()));
+ s7->SetDouble<false>(statics.Get(), 16777219);
mirror::ArtField* s8 = statics->FindStaticField("s8", "Ljava/lang/String;");
fh.ChangeField(s8);
EXPECT_TRUE(fh.GetTypeAsPrimitiveType() == Primitive::kPrimNot);
- EXPECT_TRUE(s8->GetObject(statics.get())->AsString()->Equals("android"));
+ EXPECT_TRUE(s8->GetObject(statics.Get())->AsString()->Equals("android"));
s8->SetObject<false>(s8->GetDeclaringClass(),
mirror::String::AllocFromModifiedUtf8(soa.Self(), "robot"));
// TODO: Remove EXPECT_FALSE when GCC can handle EXPECT_EQ
// http://code.google.com/p/googletest/issues/detail?id=322
- EXPECT_FALSE(s0->GetBoolean(statics.get()));
- EXPECT_EQ(6, s1->GetByte(statics.get()));
- EXPECT_EQ('b', s2->GetChar(statics.get()));
- EXPECT_EQ(-535, s3->GetShort(statics.get()));
- EXPECT_EQ(2000000001, s4->GetInt(statics.get()));
- EXPECT_EQ(INT64_C(0x34567890abcdef12), s5->GetLong(statics.get()));
- EXPECT_EQ(0.75, s6->GetFloat(statics.get()));
- EXPECT_EQ(16777219, s7->GetDouble(statics.get()));
- EXPECT_TRUE(s8->GetObject(statics.get())->AsString()->Equals("robot"));
+ EXPECT_FALSE(s0->GetBoolean(statics.Get()));
+ EXPECT_EQ(6, s1->GetByte(statics.Get()));
+ EXPECT_EQ('b', s2->GetChar(statics.Get()));
+ EXPECT_EQ(-535, s3->GetShort(statics.Get()));
+ EXPECT_EQ(2000000001, s4->GetInt(statics.Get()));
+ EXPECT_EQ(INT64_C(0x34567890abcdef12), s5->GetLong(statics.Get()));
+ EXPECT_EQ(0.75, s6->GetFloat(statics.Get()));
+ EXPECT_EQ(16777219, s7->GetDouble(statics.Get()));
+ EXPECT_TRUE(s8->GetObject(statics.Get())->AsString()->Equals("robot"));
}
TEST_F(ClassLinkerTest, Interfaces) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(LoadDex("Interfaces")));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("Interfaces"))));
mirror::Class* I = class_linker_->FindClass(soa.Self(), "LInterfaces$I;", class_loader);
mirror::Class* J = class_linker_->FindClass(soa.Self(), "LInterfaces$J;", class_loader);
mirror::Class* K = class_linker_->FindClass(soa.Self(), "LInterfaces$K;", class_loader);
@@ -1032,7 +1047,9 @@ TEST_F(ClassLinkerTest, ResolveVerifyAndClinit) {
ScopedObjectAccess soa(Thread::Current());
jobject jclass_loader = LoadDex("StaticsFromCode");
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(jclass_loader));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
const DexFile* dex_file = Runtime::Current()->GetCompileTimeClassPath(jclass_loader)[0];
CHECK(dex_file != NULL);
mirror::Class* klass = class_linker_->FindClass(soa.Self(), "LStaticsFromCode;", class_loader);
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 22a0e22..c91b014 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -28,6 +28,7 @@
#include "gc/accounting/card_table-inl.h"
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
+#include "handle_scope.h"
#include "jdwp/object_registry.h"
#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
@@ -45,8 +46,7 @@
#include "scoped_thread_state_change.h"
#include "ScopedLocalRef.h"
#include "ScopedPrimitiveArray.h"
-#include "sirt_ref.h"
-#include "stack_indirect_reference_table.h"
+#include "handle_scope-inl.h"
#include "thread_list.h"
#include "throw_location.h"
#include "utf.h"
@@ -2809,8 +2809,9 @@ static bool IsMethodPossiblyInlined(Thread* self, mirror::ArtMethod* m)
// should never be null. We could just check we never encounter this case.
return false;
}
- SirtRef<mirror::DexCache> dex_cache(self, mh.GetDexCache());
- SirtRef<mirror::ClassLoader> class_loader(self, mh.GetClassLoader());
+ StackHandleScope<2> hs(self);
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(mh.GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(mh.GetClassLoader()));
verifier::MethodVerifier verifier(&mh.GetDexFile(), &dex_cache, &class_loader,
&mh.GetClassDef(), code_item, m->GetDexMethodIndex(), m,
m->GetAccessFlags(), false, true);
@@ -3341,43 +3342,44 @@ void Dbg::ExecuteMethod(DebugInvokeReq* pReq) {
// We can be called while an exception is pending. We need
// to preserve that across the method invocation.
- SirtRef<mirror::Object> old_throw_this_object(soa.Self(), NULL);
- SirtRef<mirror::ArtMethod> old_throw_method(soa.Self(), NULL);
- SirtRef<mirror::Throwable> old_exception(soa.Self(), NULL);
+ StackHandleScope<4> hs(soa.Self());
+ auto old_throw_this_object = hs.NewHandle<mirror::Object>(nullptr);
+ auto old_throw_method = hs.NewHandle<mirror::ArtMethod>(nullptr);
+ auto old_exception = hs.NewHandle<mirror::Throwable>(nullptr);
uint32_t old_throw_dex_pc;
{
ThrowLocation old_throw_location;
mirror::Throwable* old_exception_obj = soa.Self()->GetException(&old_throw_location);
- old_throw_this_object.reset(old_throw_location.GetThis());
- old_throw_method.reset(old_throw_location.GetMethod());
- old_exception.reset(old_exception_obj);
+ old_throw_this_object.Assign(old_throw_location.GetThis());
+ old_throw_method.Assign(old_throw_location.GetMethod());
+ old_exception.Assign(old_exception_obj);
old_throw_dex_pc = old_throw_location.GetDexPc();
soa.Self()->ClearException();
}
// Translate the method through the vtable, unless the debugger wants to suppress it.
- SirtRef<mirror::ArtMethod> m(soa.Self(), pReq->method);
+ Handle<mirror::ArtMethod> m(hs.NewHandle(pReq->method));
if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver != NULL) {
- mirror::ArtMethod* actual_method = pReq->klass->FindVirtualMethodForVirtualOrInterface(m.get());
- if (actual_method != m.get()) {
- VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m.get()) << " to " << PrettyMethod(actual_method);
- m.reset(actual_method);
+ mirror::ArtMethod* actual_method = pReq->klass->FindVirtualMethodForVirtualOrInterface(m.Get());
+ if (actual_method != m.Get()) {
+ VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m.Get()) << " to " << PrettyMethod(actual_method);
+ m.Assign(actual_method);
}
}
- VLOG(jdwp) << "ExecuteMethod " << PrettyMethod(m.get())
+ VLOG(jdwp) << "ExecuteMethod " << PrettyMethod(m.Get())
<< " receiver=" << pReq->receiver
<< " arg_count=" << pReq->arg_count;
- CHECK(m.get() != nullptr);
+ CHECK(m.Get() != nullptr);
CHECK_EQ(sizeof(jvalue), sizeof(uint64_t));
- pReq->result_value = InvokeWithJValues(soa, pReq->receiver, soa.EncodeMethod(m.get()),
+ pReq->result_value = InvokeWithJValues(soa, pReq->receiver, soa.EncodeMethod(m.Get()),
reinterpret_cast<jvalue*>(pReq->arg_values));
mirror::Throwable* exception = soa.Self()->GetException(NULL);
soa.Self()->ClearException();
pReq->exception = gRegistry->Add(exception);
- pReq->result_tag = BasicTagFromDescriptor(MethodHelper(m.get()).GetShorty());
+ pReq->result_tag = BasicTagFromDescriptor(MethodHelper(m.Get()).GetShorty());
if (pReq->exception != 0) {
VLOG(jdwp) << " JDWP invocation returning with exception=" << exception
<< " " << exception->Dump();
@@ -3402,10 +3404,10 @@ void Dbg::ExecuteMethod(DebugInvokeReq* pReq) {
gRegistry->Add(pReq->result_value.GetL());
}
- if (old_exception.get() != NULL) {
- ThrowLocation gc_safe_throw_location(old_throw_this_object.get(), old_throw_method.get(),
+ if (old_exception.Get() != NULL) {
+ ThrowLocation gc_safe_throw_location(old_throw_this_object.Get(), old_throw_method.Get(),
old_throw_dex_pc);
- soa.Self()->SetException(gc_safe_throw_location, old_exception.get());
+ soa.Self()->SetException(gc_safe_throw_location, old_exception.Get());
}
}
@@ -3547,9 +3549,10 @@ void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) {
} else {
CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type;
ScopedObjectAccessUnchecked soa(Thread::Current());
- SirtRef<mirror::String> name(soa.Self(), t->GetThreadName(soa));
- size_t char_count = (name.get() != NULL) ? name->GetLength() : 0;
- const jchar* chars = (name.get() != NULL) ? name->GetCharArray()->GetData() : NULL;
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::String> name(hs.NewHandle(t->GetThreadName(soa)));
+ size_t char_count = (name.Get() != NULL) ? name->GetLength() : 0;
+ const jchar* chars = (name.Get() != NULL) ? name->GetCharArray()->GetData() : NULL;
std::vector<uint8_t> bytes;
JDWP::Append4BE(bytes, t->GetThreadId());
diff --git a/runtime/deoptimize_stack_visitor.cc b/runtime/deoptimize_stack_visitor.cc
index 3eb1792..c7fbc87 100644
--- a/runtime/deoptimize_stack_visitor.cc
+++ b/runtime/deoptimize_stack_visitor.cc
@@ -19,7 +19,7 @@
#include "mirror/art_method-inl.h"
#include "object_utils.h"
#include "quick_exception_handler.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
#include "verifier/method_verifier.h"
namespace art {
@@ -50,8 +50,9 @@ bool DeoptimizeStackVisitor::HandleDeoptimization(mirror::ArtMethod* m) {
const Instruction* inst = Instruction::At(code_item->insns_ + dex_pc);
uint32_t new_dex_pc = dex_pc + inst->SizeInCodeUnits();
ShadowFrame* new_frame = ShadowFrame::Create(num_regs, nullptr, m, new_dex_pc);
- SirtRef<mirror::DexCache> dex_cache(self_, mh.GetDexCache());
- SirtRef<mirror::ClassLoader> class_loader(self_, mh.GetClassLoader());
+ StackHandleScope<2> hs(self_);
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(mh.GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(mh.GetClassLoader()));
verifier::MethodVerifier verifier(&mh.GetDexFile(), &dex_cache, &class_loader,
&mh.GetClassDef(), code_item, m->GetDexMethodIndex(), m,
m->GetAccessFlags(), false, true);
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 6adfc1f..f3d4621 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -37,7 +37,7 @@
#include "os.h"
#include "safe_map.h"
#include "ScopedFd.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
#include "thread.h"
#include "UniquePtr.h"
#include "utf-inl.h"
@@ -1005,8 +1005,8 @@ static uint64_t ReadUnsignedLong(const byte* ptr, int zwidth, bool fill_on_right
}
EncodedStaticFieldValueIterator::EncodedStaticFieldValueIterator(const DexFile& dex_file,
- SirtRef<mirror::DexCache>* dex_cache,
- SirtRef<mirror::ClassLoader>* class_loader,
+ Handle<mirror::DexCache>* dex_cache,
+ Handle<mirror::ClassLoader>* class_loader,
ClassLinker* linker,
const DexFile::ClassDef& class_def)
: dex_file_(dex_file), dex_cache_(dex_cache), class_loader_(class_loader), linker_(linker),
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index c782ab1..cfa2555 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -42,8 +42,7 @@ namespace mirror {
} // namespace mirror
class ClassLinker;
class Signature;
-template <typename T>
-class SirtRef;
+template<class T> class Handle;
class StringPiece;
class ZipArchive;
@@ -1127,8 +1126,8 @@ class ClassDataItemIterator {
class EncodedStaticFieldValueIterator {
public:
- EncodedStaticFieldValueIterator(const DexFile& dex_file, SirtRef<mirror::DexCache>* dex_cache,
- SirtRef<mirror::ClassLoader>* class_loader,
+ EncodedStaticFieldValueIterator(const DexFile& dex_file, Handle<mirror::DexCache>* dex_cache,
+ Handle<mirror::ClassLoader>* class_loader,
ClassLinker* linker, const DexFile::ClassDef& class_def)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -1163,8 +1162,8 @@ class EncodedStaticFieldValueIterator {
static const byte kEncodedValueArgShift = 5;
const DexFile& dex_file_;
- SirtRef<mirror::DexCache>* const dex_cache_; // Dex cache to resolve literal objects.
- SirtRef<mirror::ClassLoader>* const class_loader_; // ClassLoader to resolve types.
+ Handle<mirror::DexCache>* const dex_cache_; // Dex cache to resolve literal objects.
+ Handle<mirror::ClassLoader>* const class_loader_; // ClassLoader to resolve types.
ClassLinker* linker_; // Linker to resolve literal objects.
size_t array_size_; // Size of array.
size_t pos_; // Current position.
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index e52a8fb..6998e21 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -30,7 +30,7 @@
#include "mirror/object-inl.h"
#include "mirror/throwable.h"
#include "object_utils.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
#include "thread.h"
namespace art {
@@ -72,7 +72,8 @@ ALWAYS_INLINE static inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
}
}
if (UNLIKELY(!klass->IsInitialized())) {
- SirtRef<mirror::Class> sirt_klass(self, klass);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_klass(hs.NewHandle(klass));
// EnsureInitialized (the class initializer) might cause a GC.
// may cause us to suspend meaning that another thread may try to
// change the allocator while we are stuck in the entrypoints of
@@ -82,11 +83,11 @@ ALWAYS_INLINE static inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
// has changed and to null-check the return value in case the
// initialization fails.
*slow_path = true;
- if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(sirt_klass, true, true)) {
+ if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_klass, true, true)) {
DCHECK(self->IsExceptionPending());
return nullptr; // Failure
}
- return sirt_klass.get();
+ return h_klass.Get();
}
return klass;
}
@@ -96,7 +97,8 @@ ALWAYS_INLINE static inline mirror::Class* CheckClassInitializedForObjectAlloc(m
Thread* self, bool* slow_path)
NO_THREAD_SAFETY_ANALYSIS {
if (UNLIKELY(!klass->IsInitialized())) {
- SirtRef<mirror::Class> sirt_class(self, klass);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_class(hs.NewHandle(klass));
// EnsureInitialized (the class initializer) might cause a GC.
// may cause us to suspend meaning that another thread may try to
// change the allocator while we are stuck in the entrypoints of
@@ -106,11 +108,11 @@ ALWAYS_INLINE static inline mirror::Class* CheckClassInitializedForObjectAlloc(m
// has changed and to null-check the return value in case the
// initialization fails.
*slow_path = true;
- if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(sirt_class, true, true)) {
+ if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_class, true, true)) {
DCHECK(self->IsExceptionPending());
return nullptr; // Failure
}
- return sirt_class.get();
+ return h_class.Get();
}
return klass;
}
@@ -346,14 +348,14 @@ static inline mirror::ArtField* FindFieldFromCode(uint32_t field_idx, mirror::Ar
if (LIKELY(fields_class->IsInitialized())) {
return resolved_field;
} else {
- SirtRef<mirror::Class> sirt_class(self, fields_class);
- if (LIKELY(class_linker->EnsureInitialized(sirt_class, true, true))) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_class(hs.NewHandle(fields_class));
+ if (LIKELY(class_linker->EnsureInitialized(h_class, true, true))) {
// Otherwise let's ensure the class is initialized before resolving the field.
return resolved_field;
- } else {
- DCHECK(self->IsExceptionPending()); // Throw exception and unwind
- return nullptr; // Failure.
}
+ DCHECK(self->IsExceptionPending()); // Throw exception and unwind
+ return nullptr; // Failure.
}
}
}
@@ -386,12 +388,13 @@ static inline mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx,
mirror::Object* this_object,
mirror::ArtMethod* referrer, Thread* self) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- SirtRef<mirror::Object> sirt_this(self, type == kStatic ? nullptr : this_object);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Object> handle_scope_this(hs.NewHandle(type == kStatic ? nullptr : this_object));
mirror::ArtMethod* resolved_method = class_linker->ResolveMethod(method_idx, referrer, type);
if (UNLIKELY(resolved_method == nullptr)) {
DCHECK(self->IsExceptionPending()); // Throw exception and unwind.
return nullptr; // Failure.
- } else if (UNLIKELY(sirt_this.get() == nullptr && type != kStatic)) {
+ } else if (UNLIKELY(handle_scope_this.Get() == nullptr && type != kStatic)) {
// Maintain interpreter-like semantics where NullPointerException is thrown
// after potential NoSuchMethodError from class linker.
ThrowLocation throw_location = self->GetCurrentLocationForThrow();
@@ -420,7 +423,7 @@ static inline mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx,
case kDirect:
return resolved_method;
case kVirtual: {
- mirror::ObjectArray<mirror::ArtMethod>* vtable = sirt_this->GetClass()->GetVTable();
+ mirror::ObjectArray<mirror::ArtMethod>* vtable = handle_scope_this->GetClass()->GetVTable();
uint16_t vtable_index = resolved_method->GetMethodIndex();
if (access_check &&
(vtable == nullptr || vtable_index >= static_cast<uint32_t>(vtable->GetLength()))) {
@@ -457,16 +460,16 @@ static inline mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx,
}
case kInterface: {
uint32_t imt_index = resolved_method->GetDexMethodIndex() % ClassLinker::kImtSize;
- mirror::ObjectArray<mirror::ArtMethod>* imt_table = sirt_this->GetClass()->GetImTable();
+ mirror::ObjectArray<mirror::ArtMethod>* imt_table = handle_scope_this->GetClass()->GetImTable();
mirror::ArtMethod* imt_method = imt_table->Get(imt_index);
if (!imt_method->IsImtConflictMethod()) {
return imt_method;
} else {
mirror::ArtMethod* interface_method =
- sirt_this->GetClass()->FindVirtualMethodForInterface(resolved_method);
+ handle_scope_this->GetClass()->FindVirtualMethodForInterface(resolved_method);
if (UNLIKELY(interface_method == nullptr)) {
ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(resolved_method,
- sirt_this.get(), referrer);
+ handle_scope_this.Get(), referrer);
return nullptr; // Failure.
} else {
return interface_method;
@@ -625,12 +628,13 @@ static inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx,
if (klass == referring_class && referrer->IsConstructor() && referrer->IsStatic()) {
return klass;
}
- SirtRef<mirror::Class> sirt_class(self, klass);
- if (!class_linker->EnsureInitialized(sirt_class, true, true)) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_class(hs.NewHandle(klass));
+ if (!class_linker->EnsureInitialized(h_class, true, true)) {
CHECK(self->IsExceptionPending());
return nullptr; // Failure - Indicate to caller to deliver exception
}
- return sirt_class.get();
+ return h_class.Get();
}
extern void ThrowStackOverflowError(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
index a0ba6b9..3f02ec7 100644
--- a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
+++ b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
@@ -34,14 +34,15 @@ extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, MethodHelper& m
mirror::Class* declaringClass = method->GetDeclaringClass();
if (UNLIKELY(!declaringClass->IsInitializing())) {
self->PushShadowFrame(shadow_frame);
- SirtRef<mirror::Class> sirt_c(self, declaringClass);
- if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(sirt_c, true, true))) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_class(hs.NewHandle(declaringClass));
+ if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_class, true, true))) {
self->PopShadowFrame();
DCHECK(self->IsExceptionPending());
return;
}
self->PopShadowFrame();
- CHECK(sirt_c->IsInitializing());
+ CHECK(h_class->IsInitializing());
}
}
uint16_t arg_offset = (code_item == NULL) ? 0 : code_item->registers_size_ - code_item->ins_size_;
diff --git a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
index f1b15b5..17c3222 100644
--- a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
@@ -214,8 +214,9 @@ extern "C" uint64_t artPortableToInterpreterBridge(mirror::ArtMethod* method, Th
if (method->IsStatic() && !method->GetDeclaringClass()->IsInitializing()) {
// Ensure static method's class is initialized.
- SirtRef<mirror::Class> sirt_c(self, method->GetDeclaringClass());
- if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(sirt_c, true, true)) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass()));
+ if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_class, true, true)) {
DCHECK(Thread::Current()->IsExceptionPending());
self->PopManagedStackFragment(fragment);
return 0;
@@ -396,7 +397,8 @@ extern "C" const void* artPortableResolutionTrampoline(mirror::ArtMethod* called
const void* code = nullptr;
if (LIKELY(!thread->IsExceptionPending())) {
// Ensure that the called method's class is initialized.
- SirtRef<mirror::Class> called_class(thread, called->GetDeclaringClass());
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass()));
linker->EnsureInitialized(called_class, true, true);
if (LIKELY(called_class->IsInitialized())) {
code = called->GetEntryPointFromPortableCompiledCode();
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 116957d..9c9cca8 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -65,7 +65,7 @@ static void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* self) {
JNIEnvExt* env = self->GetJniEnv();
env->locals.SetSegmentState(env->local_ref_cookie);
env->local_ref_cookie = saved_local_ref_cookie;
- self->PopSirt();
+ self->PopHandleScope();
}
extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self) {
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 5d2603f..887bd6f 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -489,8 +489,9 @@ extern "C" uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Threa
if (method->IsStatic() && !method->GetDeclaringClass()->IsInitializing()) {
// Ensure static method's class is initialized.
- SirtRef<mirror::Class> sirt_c(self, method->GetDeclaringClass());
- if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(sirt_c, true, true)) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass()));
+ if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_class, true, true)) {
DCHECK(Thread::Current()->IsExceptionPending()) << PrettyMethod(method);
self->PopManagedStackFragment(fragment);
return 0;
@@ -755,9 +756,10 @@ extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called,
bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface;
// Resolve method filling in dex cache.
if (called->IsRuntimeMethod()) {
- SirtRef<mirror::Object> sirt_receiver(soa.Self(), virtual_or_interface ? receiver : nullptr);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Object> handle_scope_receiver(hs.NewHandle(virtual_or_interface ? receiver : nullptr));
called = linker->ResolveMethod(dex_method_idx, caller, invoke_type);
- receiver = sirt_receiver.get();
+ receiver = handle_scope_receiver.Get();
}
const void* code = NULL;
if (LIKELY(!self->IsExceptionPending())) {
@@ -796,7 +798,8 @@ extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called,
}
}
// Ensure that the called method's class is initialized.
- SirtRef<mirror::Class> called_class(soa.Self(), called->GetDeclaringClass());
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass()));
linker->EnsureInitialized(called_class, true, true);
if (LIKELY(called_class->IsInitialized())) {
code = called->GetEntryPointFromQuickCompiledCode();
@@ -857,10 +860,10 @@ extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called,
*
* void PushStack(uintptr_t): Push a value to the stack.
*
- * uintptr_t PushSirt(mirror::Object* ref): Add a reference to the Sirt. This _will_ have nullptr,
+ * uintptr_t PushHandleScope(mirror::Object* ref): Add a reference to the HandleScope. This _will_ have nullptr,
* as this might be important for null initialization.
* Must return the jobject, that is, the reference to the
- * entry in the Sirt (nullptr if necessary).
+ * entry in the HandleScope (nullptr if necessary).
*
*/
template <class T> class BuildGenericJniFrameStateMachine {
@@ -956,18 +959,18 @@ template <class T> class BuildGenericJniFrameStateMachine {
}
- bool HaveSirtGpr() {
+ bool HaveHandleScopeGpr() {
return gpr_index_ > 0;
}
- void AdvanceSirt(mirror::Object* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- uintptr_t sirtRef = PushSirt(ptr);
- if (HaveSirtGpr()) {
+ void AdvanceHandleScope(mirror::Object* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uintptr_t handle = PushHandle(ptr);
+ if (HaveHandleScopeGpr()) {
gpr_index_--;
- PushGpr(sirtRef);
+ PushGpr(handle);
} else {
stack_entries_++;
- PushStack(sirtRef);
+ PushStack(handle);
gpr_index_ = 0;
}
}
@@ -1147,8 +1150,8 @@ template <class T> class BuildGenericJniFrameStateMachine {
void PushStack(uintptr_t val) {
delegate_->PushStack(val);
}
- uintptr_t PushSirt(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return delegate_->PushSirt(ref);
+ uintptr_t PushHandle(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return delegate_->PushHandle(ref);
}
uint32_t gpr_index_; // Number of free GPRs
@@ -1160,7 +1163,7 @@ template <class T> class BuildGenericJniFrameStateMachine {
class ComputeGenericJniFrameSize FINAL {
public:
- ComputeGenericJniFrameSize() : num_sirt_references_(0), num_stack_entries_(0) {}
+ ComputeGenericJniFrameSize() : num_handle_scope_references_(0), num_stack_entries_(0) {}
uint32_t GetStackSize() {
return num_stack_entries_ * sizeof(uintptr_t);
@@ -1168,7 +1171,7 @@ class ComputeGenericJniFrameSize FINAL {
// WARNING: After this, *sp won't be pointing to the method anymore!
void ComputeLayout(mirror::ArtMethod*** m, bool is_static, const char* shorty, uint32_t shorty_len,
- void* sp, StackIndirectReferenceTable** table, uint32_t* sirt_entries,
+ void* sp, HandleScope** table, uint32_t* handle_scope_entries,
uintptr_t** start_stack, uintptr_t** start_gpr, uint32_t** start_fpr,
void** code_return, size_t* overall_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -1179,17 +1182,17 @@ class ComputeGenericJniFrameSize FINAL {
uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp);
// First, fix up the layout of the callee-save frame.
- // We have to squeeze in the Sirt, and relocate the method pointer.
+ // We have to squeeze in the HandleScope, and relocate the method pointer.
// "Free" the slot for the method.
sp8 += kPointerSize;
- // Add the Sirt.
- *sirt_entries = num_sirt_references_;
- size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSize(num_sirt_references_);
- sp8 -= sirt_size;
- *table = reinterpret_cast<StackIndirectReferenceTable*>(sp8);
- (*table)->SetNumberOfReferences(num_sirt_references_);
+ // Add the HandleScope.
+ *handle_scope_entries = num_handle_scope_references_;
+ size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSize(num_handle_scope_references_);
+ sp8 -= handle_scope_size;
+ *table = reinterpret_cast<HandleScope*>(sp8);
+ (*table)->SetNumberOfReferences(num_handle_scope_references_);
// Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us.
sp8 -= kPointerSize;
@@ -1199,8 +1202,8 @@ class ComputeGenericJniFrameSize FINAL {
// Reference cookie and padding
sp8 -= 8;
- // Store Sirt size
- *reinterpret_cast<uint32_t*>(sp8) = static_cast<uint32_t>(sirt_size & 0xFFFFFFFF);
+ // Store HandleScope size
+ *reinterpret_cast<uint32_t*>(sp8) = static_cast<uint32_t>(handle_scope_size & 0xFFFFFFFF);
// Next comes the native call stack.
sp8 -= GetStackSize();
@@ -1229,7 +1232,7 @@ class ComputeGenericJniFrameSize FINAL {
*(reinterpret_cast<uint8_t**>(sp8)) = method_pointer;
}
- void ComputeSirtOffset() { } // nothing to do, static right now
+ void ComputeHandleScopeOffset() { } // nothing to do, static right now
void ComputeAll(bool is_static, const char* shorty, uint32_t shorty_len)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -1239,13 +1242,13 @@ class ComputeGenericJniFrameSize FINAL {
sm.AdvancePointer(nullptr);
// Class object or this as first argument
- sm.AdvanceSirt(reinterpret_cast<mirror::Object*>(0x12345678));
+ sm.AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678));
for (uint32_t i = 1; i < shorty_len; ++i) {
Primitive::Type cur_type_ = Primitive::GetType(shorty[i]);
switch (cur_type_) {
case Primitive::kPrimNot:
- sm.AdvanceSirt(reinterpret_cast<mirror::Object*>(0x12345678));
+ sm.AdvanceHandleScope(reinterpret_cast<mirror::Object*>(0x12345678));
break;
case Primitive::kPrimBoolean:
@@ -1288,13 +1291,13 @@ class ComputeGenericJniFrameSize FINAL {
// counting is already done in the superclass
}
- uintptr_t PushSirt(mirror::Object* /* ptr */) {
- num_sirt_references_++;
+ uintptr_t PushHandle(mirror::Object* /* ptr */) {
+ num_handle_scope_references_++;
return reinterpret_cast<uintptr_t>(nullptr);
}
private:
- uint32_t num_sirt_references_;
+ uint32_t num_handle_scope_references_;
uint32_t num_stack_entries_;
};
@@ -1306,26 +1309,26 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
uint32_t shorty_len, Thread* self) :
QuickArgumentVisitor(*sp, is_static, shorty, shorty_len), sm_(this) {
ComputeGenericJniFrameSize fsc;
- fsc.ComputeLayout(sp, is_static, shorty, shorty_len, *sp, &sirt_, &sirt_expected_refs_,
+ fsc.ComputeLayout(sp, is_static, shorty, shorty_len, *sp, &handle_scope_, &handle_scope_expected_refs_,
&cur_stack_arg_, &cur_gpr_reg_, &cur_fpr_reg_, &code_return_,
&alloca_used_size_);
- sirt_number_of_references_ = 0;
- cur_sirt_entry_ = reinterpret_cast<StackReference<mirror::Object>*>(GetFirstSirtEntry());
+ handle_scope_number_of_references_ = 0;
+ cur_hs_entry_ = reinterpret_cast<StackReference<mirror::Object>*>(GetFirstHandleScopeEntry());
// jni environment is always first argument
sm_.AdvancePointer(self->GetJniEnv());
if (is_static) {
- sm_.AdvanceSirt((**sp)->GetDeclaringClass());
+ sm_.AdvanceHandleScope((**sp)->GetDeclaringClass());
}
}
void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
- void FinalizeSirt(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void FinalizeHandleScope(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- jobject GetFirstSirtEntry() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return reinterpret_cast<jobject>(sirt_->GetStackReference(0));
+ jobject GetFirstHandleScopeEntry() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return handle_scope_->GetHandle(0).ToJObject();
}
void PushGpr(uintptr_t val) {
@@ -1349,17 +1352,17 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
cur_stack_arg_++;
}
- uintptr_t PushSirt(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uintptr_t PushHandle(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
uintptr_t tmp;
if (ref == nullptr) {
- *cur_sirt_entry_ = StackReference<mirror::Object>();
+ *cur_hs_entry_ = StackReference<mirror::Object>();
tmp = reinterpret_cast<uintptr_t>(nullptr);
} else {
- *cur_sirt_entry_ = StackReference<mirror::Object>::FromMirrorPtr(ref);
- tmp = reinterpret_cast<uintptr_t>(cur_sirt_entry_);
+ *cur_hs_entry_ = StackReference<mirror::Object>::FromMirrorPtr(ref);
+ tmp = reinterpret_cast<uintptr_t>(cur_hs_entry_);
}
- cur_sirt_entry_++;
- sirt_number_of_references_++;
+ cur_hs_entry_++;
+ handle_scope_number_of_references_++;
return tmp;
}
@@ -1373,14 +1376,14 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
}
private:
- uint32_t sirt_number_of_references_;
- StackReference<mirror::Object>* cur_sirt_entry_;
- StackIndirectReferenceTable* sirt_;
- uint32_t sirt_expected_refs_;
+ uint32_t handle_scope_number_of_references_;
+ StackReference<mirror::Object>* cur_hs_entry_;
+ HandleScope* handle_scope_;
+ uint32_t handle_scope_expected_refs_;
uintptr_t* cur_gpr_reg_;
uint32_t* cur_fpr_reg_;
uintptr_t* cur_stack_arg_;
- // StackReference<mirror::Object>* top_of_sirt_;
+ // StackReference<mirror::Object>* top_of_handle_scope_;
void* code_return_;
size_t alloca_used_size_;
@@ -1416,7 +1419,7 @@ void BuildGenericJniFrameVisitor::Visit() {
case Primitive::kPrimNot: {
StackReference<mirror::Object>* stack_ref =
reinterpret_cast<StackReference<mirror::Object>*>(GetParamAddress());
- sm_.AdvanceSirt(stack_ref->AsMirrorPtr());
+ sm_.AdvanceHandleScope(stack_ref->AsMirrorPtr());
break;
}
case Primitive::kPrimFloat:
@@ -1435,17 +1438,17 @@ void BuildGenericJniFrameVisitor::Visit() {
}
}
-void BuildGenericJniFrameVisitor::FinalizeSirt(Thread* self) {
+void BuildGenericJniFrameVisitor::FinalizeHandleScope(Thread* self) {
// Initialize padding entries.
- while (sirt_number_of_references_ < sirt_expected_refs_) {
- *cur_sirt_entry_ = StackReference<mirror::Object>();
- cur_sirt_entry_++;
- sirt_number_of_references_++;
- }
- sirt_->SetNumberOfReferences(sirt_expected_refs_);
- DCHECK_NE(sirt_expected_refs_, 0U);
- // Install Sirt.
- self->PushSirt(sirt_);
+ while (handle_scope_number_of_references_ < handle_scope_expected_refs_) {
+ *cur_hs_entry_ = StackReference<mirror::Object>();
+ cur_hs_entry_++;
+ handle_scope_number_of_references_++;
+ }
+ handle_scope_->SetNumberOfReferences(handle_scope_expected_refs_);
+ DCHECK_NE(handle_scope_expected_refs_, 0U);
+ // Install HandleScope.
+ self->PushHandleScope(handle_scope_);
}
extern "C" void* artFindNativeMethod();
@@ -1468,11 +1471,11 @@ void artQuickGenericJniEndJNINonRef(Thread* self, uint32_t cookie, jobject lock)
/*
* Initializes an alloca region assumed to be directly below sp for a native call:
- * Create a Sirt and call stack and fill a mini stack with values to be pushed to registers.
+ * Create a HandleScope and call stack and fill a mini stack with values to be pushed to registers.
* The final element on the stack is a pointer to the native code.
*
* On entry, the stack has a standard callee-save frame above sp, and an alloca below it.
- * We need to fix this, as the Sirt needs to go into the callee-save frame.
+ * We need to fix this, as the handle scope needs to go into the callee-save frame.
*
* The return of this function denotes:
* 1) How many bytes of the alloca can be released, if the value is non-negative.
@@ -1489,7 +1492,7 @@ extern "C" ssize_t artQuickGenericJniTrampoline(Thread* self, mirror::ArtMethod*
BuildGenericJniFrameVisitor visitor(&sp, called->IsStatic(), mh.GetShorty(), mh.GetShortyLength(),
self);
visitor.VisitArguments();
- visitor.FinalizeSirt(self);
+ visitor.FinalizeHandleScope(self);
// fix up managed-stack things in Thread
self->SetTopOfStack(sp, 0);
@@ -1499,9 +1502,9 @@ extern "C" ssize_t artQuickGenericJniTrampoline(Thread* self, mirror::ArtMethod*
// Start JNI, save the cookie.
uint32_t cookie;
if (called->IsSynchronized()) {
- cookie = JniMethodStartSynchronized(visitor.GetFirstSirtEntry(), self);
+ cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeEntry(), self);
if (self->IsExceptionPending()) {
- self->PopSirt();
+ self->PopHandleScope();
// A negative value denotes an error.
return -1;
}
@@ -1527,7 +1530,7 @@ extern "C" ssize_t artQuickGenericJniTrampoline(Thread* self, mirror::ArtMethod*
DCHECK(self->IsExceptionPending()); // There should be an exception pending now.
// End JNI, as the assembly will move to deliver the exception.
- jobject lock = called->IsSynchronized() ? visitor.GetFirstSirtEntry() : nullptr;
+ jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeEntry() : nullptr;
if (mh.GetShorty()[0] == 'L') {
artQuickGenericJniEndJNIRef(self, cookie, nullptr, lock);
} else {
@@ -1549,7 +1552,7 @@ extern "C" ssize_t artQuickGenericJniTrampoline(Thread* self, mirror::ArtMethod*
}
/*
- * Is called after the native JNI code. Responsible for cleanup (SIRT, saved state) and
+ * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and
* unlocking.
*/
extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, mirror::ArtMethod** sp,
@@ -1561,10 +1564,9 @@ extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, mirror::ArtMet
jobject lock = nullptr;
if (called->IsSynchronized()) {
- StackIndirectReferenceTable* table =
- reinterpret_cast<StackIndirectReferenceTable*>(
- reinterpret_cast<uint8_t*>(sp) + kPointerSize);
- lock = reinterpret_cast<jobject>(table->GetStackReference(0));
+ HandleScope* table = reinterpret_cast<HandleScope*>(
+ reinterpret_cast<uint8_t*>(sp) + kPointerSize);
+ lock = table->GetHandle(0).ToJObject();
}
MethodHelper mh(called);
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index feb2331..91a0176 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -25,7 +25,7 @@
#include "mirror/stack_trace_element.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
#include "thread.h"
#include "UniquePtr.h"
#include "vmap_table.h"
@@ -38,13 +38,14 @@ class ExceptionTest : public CommonRuntimeTest {
CommonRuntimeTest::SetUp();
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> class_loader(
- soa.Self(), soa.Decode<mirror::ClassLoader*>(LoadDex("ExceptionHandle")));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("ExceptionHandle"))));
my_klass_ = class_linker_->FindClass(soa.Self(), "LExceptionHandle;", class_loader);
ASSERT_TRUE(my_klass_ != NULL);
- SirtRef<mirror::Class> sirt_klass(soa.Self(), my_klass_);
- class_linker_->EnsureInitialized(sirt_klass, true, true);
- my_klass_ = sirt_klass.get();
+ Handle<mirror::Class> klass(hs.NewHandle(my_klass_));
+ class_linker_->EnsureInitialized(klass, true, true);
+ my_klass_ = klass.Get();
dex_ = my_klass_->GetDexCache()->GetDexFile();
diff --git a/runtime/fault_handler.h b/runtime/fault_handler.h
index ea2f7c8..97d3c2f 100644
--- a/runtime/fault_handler.h
+++ b/runtime/fault_handler.h
@@ -112,7 +112,7 @@ class JavaStackTraceHandler FINAL : public FaultHandler {
};
-// Statically allocated so the the signal handler can get access to it.
+// Statically allocated so the the signal handler can Get access to it.
extern FaultManager fault_manager;
} // namespace art
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index d05f45b..02dd4d9 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -142,7 +142,7 @@ class GarbageCollector {
virtual void RevokeAllThreadLocalBuffers() = 0;
// Record that you have freed some objects or large objects, calls Heap::RecordFree.
- // TODO: These are not thread safe, add a lock if we get have parallel sweeping.
+ // TODO: These are not thread safe, add a lock if we get parallel sweeping.
void RecordFree(uint64_t freed_objects, int64_t freed_bytes);
void RecordFreeLargeObjects(uint64_t freed_objects, int64_t freed_bytes);
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 5de7026..cc258f5 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -106,7 +106,7 @@ MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_pre
void MarkSweep::InitializePhase() {
TimingLogger::ScopedSplit split("InitializePhase", &timings_);
- mark_stack_ = heap_->mark_stack_.get();
+ mark_stack_ = heap_->GetMarkStack();
DCHECK(mark_stack_ != nullptr);
immune_region_.Reset();
class_count_ = 0;
@@ -123,7 +123,7 @@ void MarkSweep::InitializePhase() {
mark_fastpath_count_ = 0;
mark_slowpath_count_ = 0;
{
- // TODO: I don't think we should need heap bitmap lock to get the mark bitmap.
+ // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
mark_bitmap_ = heap_->GetMarkBitmap();
}
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 3ebc0af..cfb0b5e 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -264,7 +264,7 @@ class MarkSweep : public GarbageCollector {
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Used to get around thread safety annotations. The call is from MarkingPhase and is guarded by
+ // Used to Get around thread safety annotations. The call is from MarkingPhase and is guarded by
// IsExclusiveHeld.
void RevokeAllThreadLocalAllocationStacks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h
index 55140f6..47682cc 100644
--- a/runtime/gc/collector/semi_space-inl.h
+++ b/runtime/gc/collector/semi_space-inl.h
@@ -50,7 +50,7 @@ inline mirror::Object* SemiSpace::GetForwardingAddressInFromSpace(mirror::Object
return reinterpret_cast<mirror::Object*>(lock_word.ForwardingAddress());
}
-// Used to mark and copy objects. Any newly-marked objects who are in the from space get moved to
+// Used to mark and copy objects. Any newly-marked objects who are in the from space Get moved to
// the to-space and have their forward address updated. Objects which have been newly marked are
// pushed on the mark stack.
template<bool kPoisonReferences>
@@ -72,7 +72,7 @@ inline void SemiSpace::MarkObject(
forward_address = MarkNonForwardedObject(obj);
DCHECK(forward_address != nullptr);
// Make sure to only update the forwarding address AFTER you copy the object so that the
- // monitor word doesn't get stomped over.
+ // monitor word doesn't Get stomped over.
obj->SetLockWord(
LockWord::FromForwardingAddress(reinterpret_cast<size_t>(forward_address)), false);
// Push the object onto the mark stack for later processing.
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index a406f6d..95a2c96 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -142,7 +142,7 @@ void SemiSpace::RunPhases() {
void SemiSpace::InitializePhase() {
TimingLogger::ScopedSplit split("InitializePhase", &timings_);
- mark_stack_ = heap_->mark_stack_.get();
+ mark_stack_ = heap_->GetMarkStack();
DCHECK(mark_stack_ != nullptr);
immune_region_.Reset();
is_large_object_space_immune_ = false;
@@ -154,7 +154,7 @@ void SemiSpace::InitializePhase() {
// Set the initial bitmap.
to_space_live_bitmap_ = to_space_->GetLiveBitmap();
{
- // TODO: I don't think we should need heap bitmap lock to get the mark bitmap.
+ // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
mark_bitmap_ = heap_->GetMarkBitmap();
}
@@ -172,7 +172,7 @@ void SemiSpace::MarkingPhase() {
CHECK(Locks::mutator_lock_->IsExclusiveHeld(self_));
if (kStoreStackTraces) {
Locks::mutator_lock_->AssertExclusiveHeld(self_);
- // Store the stack traces into the runtime fault string in case we get a heap corruption
+ // Store the stack traces into the runtime fault string in case we Get a heap corruption
// related crash later.
ThreadState old_state = self_->SetStateUnsafe(kRunnable);
std::ostringstream oss;
@@ -231,7 +231,7 @@ void SemiSpace::MarkingPhase() {
BindBitmaps();
// Process dirty cards and add dirty cards to mod-union tables.
heap_->ProcessCards(timings_, kUseRememberedSet && generational_);
- // Clear the whole card table since we can not get any additional dirty cards during the
+ // Clear the whole card table since we can not Get any additional dirty cards during the
// paused GC. This saves memory but only works for pause the world collectors.
timings_.NewSplit("ClearCardTable");
heap_->GetCardTable()->ClearCardTable();
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 9fdf471..4b1ecc4 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -242,7 +242,7 @@ class SemiSpace : public GarbageCollector {
// heap. When false, collect only the bump pointer spaces.
bool whole_heap_collection_;
- // How many objects and bytes we moved, used so that we don't need to get the size of the
+ // How many objects and bytes we moved, used so that we don't need to Get the size of the
// to_space_ when calculating how many objects and bytes we freed.
size_t bytes_moved_;
size_t objects_moved_;
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index ce51ac5..5a58446 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -49,7 +49,7 @@ void StickyMarkSweep::BindBitmaps() {
void StickyMarkSweep::MarkReachableObjects() {
// All reachable objects must be referenced by a root or a dirty card, so we can clear the mark
- // stack here since all objects in the mark stack will get scanned by the card scanning anyways.
+ // stack here since all objects in the mark stack will Get scanned by the card scanning anyways.
// TODO: Not put these objects in the mark stack in the first place.
mark_stack_->Reset();
RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1);
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index a06f272..7cee5a0 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -27,7 +27,7 @@
#include "gc/space/large_object_space.h"
#include "gc/space/rosalloc_space-inl.h"
#include "runtime.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
#include "thread.h"
#include "thread-inl.h"
#include "verify_object-inl.h"
@@ -144,10 +144,10 @@ inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object** obj) {
mirror::Object** end_address;
while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize,
&start_address, &end_address)) {
- // Disable verify object in SirtRef as obj isn't on the alloc stack yet.
- SirtRefNoVerify<mirror::Object> ref(self, *obj);
+ // TODO: Add handle VerifyObject.
+ StackHandleScope<1> hs(self);
+ HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
- *obj = ref.get();
}
self->SetThreadLocalAllocationStack(start_address, end_address);
// Retry on the new thread-local allocation stack.
@@ -159,10 +159,10 @@ inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object** obj) {
// This is safe to do since the GC will never free objects which are neither in the allocation
// stack or the live bitmap.
while (!allocation_stack_->AtomicPushBack(*obj)) {
- // Disable verify object in SirtRef as obj isn't on the alloc stack yet.
- SirtRefNoVerify<mirror::Object> ref(self, *obj);
+ // TODO: Add handle VerifyObject.
+ StackHandleScope<1> hs(self);
+ HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
- *obj = ref.get();
}
}
}
@@ -300,11 +300,7 @@ inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t
inline void Heap::CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated,
mirror::Object** obj) {
if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) {
- // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint.
- SirtRef<mirror::Object> ref(self, *obj);
- RequestConcurrentGC(self);
- // Restore obj in case it moved.
- *obj = ref.get();
+ RequestConcurrentGCAndSaveObject(self, obj);
}
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 7235729..4642a98 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -62,7 +62,7 @@
#include "runtime.h"
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
#include "thread_list.h"
#include "UniquePtr.h"
#include "well_known_classes.h"
@@ -1070,10 +1070,11 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocat
size_t alloc_size, size_t* bytes_allocated,
size_t* usable_size,
mirror::Class** klass) {
- mirror::Object* ptr = nullptr;
bool was_default_allocator = allocator == GetCurrentAllocator();
DCHECK(klass != nullptr);
- SirtRef<mirror::Class> sirt_klass(self, *klass);
+ StackHandleScope<1> hs(self);
+ HandleWrapper<mirror::Class> h(hs.NewHandleWrapper(klass));
+ klass = nullptr; // Invalidate for safety.
// The allocation failed. If the GC is running, block until it completes, and then retry the
// allocation.
collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
@@ -1081,31 +1082,32 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocat
// If we were the default allocator but the allocator changed while we were suspended,
// abort the allocation.
if (was_default_allocator && allocator != GetCurrentAllocator()) {
- *klass = sirt_klass.get();
return nullptr;
}
// A GC was in progress and we blocked, retry allocation now that memory has been freed.
- ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, usable_size);
+ mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
+ usable_size);
+ if (ptr != nullptr) {
+ return ptr;
+ }
}
collector::GcType tried_type = next_gc_type_;
- if (ptr == nullptr) {
- const bool gc_ran =
- CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
- if (was_default_allocator && allocator != GetCurrentAllocator()) {
- *klass = sirt_klass.get();
- return nullptr;
- }
- if (gc_ran) {
- ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, usable_size);
+ const bool gc_ran =
+ CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
+ if (was_default_allocator && allocator != GetCurrentAllocator()) {
+ return nullptr;
+ }
+ if (gc_ran) {
+ mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
+ usable_size);
+ if (ptr != nullptr) {
+ return ptr;
}
}
// Loop through our different Gc types and try to Gc until we get enough free memory.
for (collector::GcType gc_type : gc_plan_) {
- if (ptr != nullptr) {
- break;
- }
if (gc_type == tried_type) {
continue;
}
@@ -1113,40 +1115,41 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocat
const bool gc_ran =
CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
if (was_default_allocator && allocator != GetCurrentAllocator()) {
- *klass = sirt_klass.get();
return nullptr;
}
if (gc_ran) {
// Did we free sufficient memory for the allocation to succeed?
- ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, usable_size);
+ mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
+ usable_size);
+ if (ptr != nullptr) {
+ return ptr;
+ }
}
}
// Allocations have failed after GCs; this is an exceptional state.
- if (ptr == nullptr) {
- // Try harder, growing the heap if necessary.
- ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
+ // Try harder, growing the heap if necessary.
+ mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
+ usable_size);
+ if (ptr != nullptr) {
+ return ptr;
+ }
+ // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
+ // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
+ // VM spec requires that all SoftReferences have been collected and cleared before throwing
+ // OOME.
+ VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
+ << " allocation";
+ // TODO: Run finalization, but this may cause more allocations to occur.
+ // We don't need a WaitForGcToComplete here either.
+ DCHECK(!gc_plan_.empty());
+ CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
+ if (was_default_allocator && allocator != GetCurrentAllocator()) {
+ return nullptr;
}
+ ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
if (ptr == nullptr) {
- // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
- // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
- // VM spec requires that all SoftReferences have been collected and cleared before throwing
- // OOME.
- VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
- << " allocation";
- // TODO: Run finalization, but this may cause more allocations to occur.
- // We don't need a WaitForGcToComplete here either.
- DCHECK(!gc_plan_.empty());
- CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
- if (was_default_allocator && allocator != GetCurrentAllocator()) {
- *klass = sirt_klass.get();
- return nullptr;
- }
- ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
- if (ptr == nullptr) {
- ThrowOutOfMemoryError(self, alloc_size, false);
- }
+ ThrowOutOfMemoryError(self, alloc_size, false);
}
- *klass = sirt_klass.get();
return ptr;
}
@@ -2536,6 +2539,12 @@ void Heap::AddFinalizerReference(Thread* self, mirror::Object** object) {
*object = soa.Decode<mirror::Object*>(arg.get());
}
+void Heap::RequestConcurrentGCAndSaveObject(Thread* self, mirror::Object** obj) {
+ StackHandleScope<1> hs(self);
+ HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
+ RequestConcurrentGC(self);
+}
+
void Heap::RequestConcurrentGC(Thread* self) {
// Make sure that we can do a concurrent GC.
Runtime* runtime = Runtime::Current();
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index f71de1a..3b071d1 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -412,7 +412,7 @@ class Heap {
return GetTotalMemory() - num_bytes_allocated_;
}
- // Get the space that corresponds to an object's address. Current implementation searches all
+ // get the space that corresponds to an object's address. Current implementation searches all
// spaces in turn. If fail_ok is false then failing to find a space will cause an abort.
// TODO: consider using faster data structure like binary tree.
space::ContinuousSpace* FindContinuousSpaceFromObject(const mirror::Object*, bool fail_ok) const;
@@ -582,6 +582,10 @@ class Heap {
mirror::Object** obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ accounting::ObjectStack* GetMarkStack() {
+ return mark_stack_.get();
+ }
+
// We don't force this to be inlined since it is a slow path.
template <bool kInstrumented, typename PreFenceVisitor>
mirror::Object* AllocLargeObject(Thread* self, mirror::Class* klass, size_t byte_count,
@@ -634,7 +638,10 @@ class Heap {
void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time)
LOCKS_EXCLUDED(heap_trim_request_lock_);
void RequestHeapTrim() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
- void RequestConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
+ void RequestConcurrentGCAndSaveObject(Thread* self, mirror::Object** obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void RequestConcurrentGC(Thread* self)
+ LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
bool IsGCRequestPending() const;
// Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index a85ad4d..8850b92 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -20,7 +20,7 @@
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
namespace art {
namespace gc {
@@ -43,14 +43,16 @@ TEST_F(HeapTest, GarbageCollectClassLinkerInit) {
ScopedObjectAccess soa(Thread::Current());
// garbage is created during ClassLinker::Init
- SirtRef<mirror::Class> c(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
- "[Ljava/lang/Object;"));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Class> c(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
for (size_t i = 0; i < 1024; ++i) {
- SirtRef<mirror::ObjectArray<mirror::Object> > array(soa.Self(),
- mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), c.get(), 2048));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ObjectArray<mirror::Object> > array(hs.NewHandle(
+ mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), c.Get(), 2048)));
for (size_t j = 0; j < 2048; ++j) {
mirror::String* string = mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!");
- // SIRT operator -> deferences the SIRT before running the method.
+ // handle scope operator -> deferences the handle scope before running the method.
array->Set<false>(j, string);
}
}
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 7493c19..ba46dcc 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -24,7 +24,7 @@
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "runtime.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
#include "thread.h"
#include "thread_list.h"
#include "utils.h"
diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h
index 3335e72..ce101e4 100644
--- a/runtime/gc/space/space_test.h
+++ b/runtime/gc/space/space_test.h
@@ -48,7 +48,8 @@ class SpaceTest : public CommonRuntimeTest {
}
mirror::Class* GetByteArrayClass(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SirtRef<mirror::ClassLoader> null_loader(self, nullptr);
+ StackHandleScope<1> hs(self);
+ auto null_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
if (byte_array_class_ == nullptr) {
mirror::Class* byte_array_class =
Runtime::Current()->GetClassLinker()->FindClass(self, "[B", null_loader);
@@ -62,10 +63,11 @@ class SpaceTest : public CommonRuntimeTest {
mirror::Object* Alloc(space::MallocSpace* alloc_space, Thread* self, size_t bytes,
size_t* bytes_allocated, size_t* usable_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SirtRef<mirror::Class> byte_array_class(self, GetByteArrayClass(self));
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
mirror::Object* obj = alloc_space->Alloc(self, bytes, bytes_allocated, usable_size);
if (obj != nullptr) {
- InstallClass(obj, byte_array_class.get(), bytes);
+ InstallClass(obj, byte_array_class.Get(), bytes);
}
return obj;
}
@@ -73,10 +75,11 @@ class SpaceTest : public CommonRuntimeTest {
mirror::Object* AllocWithGrowth(space::MallocSpace* alloc_space, Thread* self, size_t bytes,
size_t* bytes_allocated, size_t* usable_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SirtRef<mirror::Class> byte_array_class(self, GetByteArrayClass(self));
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
mirror::Object* obj = alloc_space->AllocWithGrowth(self, bytes, bytes_allocated, usable_size);
if (obj != nullptr) {
- InstallClass(obj, byte_array_class.get(), bytes);
+ InstallClass(obj, byte_array_class.Get(), bytes);
}
return obj;
}
@@ -177,9 +180,10 @@ void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) {
// Succeeds, fits without adjusting the footprint limit.
size_t ptr1_bytes_allocated, ptr1_usable_size;
- SirtRef<mirror::Object> ptr1(self, Alloc(space, self, 1 * MB, &ptr1_bytes_allocated,
- &ptr1_usable_size));
- EXPECT_TRUE(ptr1.get() != nullptr);
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::Object> ptr1(
+ hs.NewHandle(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size)));
+ EXPECT_TRUE(ptr1.Get() != nullptr);
EXPECT_LE(1U * MB, ptr1_bytes_allocated);
EXPECT_LE(1U * MB, ptr1_usable_size);
EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
@@ -190,9 +194,9 @@ void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) {
// Succeeds, adjusts the footprint.
size_t ptr3_bytes_allocated, ptr3_usable_size;
- SirtRef<mirror::Object> ptr3(self, AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated,
- &ptr3_usable_size));
- EXPECT_TRUE(ptr3.get() != nullptr);
+ Handle<mirror::Object> ptr3(
+ hs.NewHandle(AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated, &ptr3_usable_size)));
+ EXPECT_TRUE(ptr3.Get() != nullptr);
EXPECT_LE(8U * MB, ptr3_bytes_allocated);
EXPECT_LE(8U * MB, ptr3_usable_size);
EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
@@ -206,23 +210,23 @@ void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) {
EXPECT_TRUE(ptr5 == nullptr);
// Release some memory.
- size_t free3 = space->AllocationSize(ptr3.get(), nullptr);
+ size_t free3 = space->AllocationSize(ptr3.Get(), nullptr);
EXPECT_EQ(free3, ptr3_bytes_allocated);
- EXPECT_EQ(free3, space->Free(self, ptr3.reset(nullptr)));
+ EXPECT_EQ(free3, space->Free(self, ptr3.Assign(nullptr)));
EXPECT_LE(8U * MB, free3);
// Succeeds, now that memory has been freed.
size_t ptr6_bytes_allocated, ptr6_usable_size;
- SirtRef<mirror::Object> ptr6(self, AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated,
- &ptr6_usable_size));
- EXPECT_TRUE(ptr6.get() != nullptr);
+ Handle<mirror::Object> ptr6(
+ hs.NewHandle(AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated, &ptr6_usable_size)));
+ EXPECT_TRUE(ptr6.Get() != nullptr);
EXPECT_LE(9U * MB, ptr6_bytes_allocated);
EXPECT_LE(9U * MB, ptr6_usable_size);
EXPECT_LE(ptr6_usable_size, ptr6_bytes_allocated);
// Final clean up.
- size_t free1 = space->AllocationSize(ptr1.get(), nullptr);
- space->Free(self, ptr1.reset(nullptr));
+ size_t free1 = space->AllocationSize(ptr1.Get(), nullptr);
+ space->Free(self, ptr1.Assign(nullptr));
EXPECT_LE(1U * MB, free1);
// Make sure that the zygote space isn't directly at the start of the space.
@@ -243,8 +247,8 @@ void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) {
AddSpace(space, false);
// Succeeds, fits without adjusting the footprint limit.
- ptr1.reset(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size));
- EXPECT_TRUE(ptr1.get() != nullptr);
+ ptr1.Assign(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size));
+ EXPECT_TRUE(ptr1.Get() != nullptr);
EXPECT_LE(1U * MB, ptr1_bytes_allocated);
EXPECT_LE(1U * MB, ptr1_usable_size);
EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
@@ -254,16 +258,16 @@ void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) {
EXPECT_TRUE(ptr2 == nullptr);
// Succeeds, adjusts the footprint.
- ptr3.reset(AllocWithGrowth(space, self, 2 * MB, &ptr3_bytes_allocated, &ptr3_usable_size));
- EXPECT_TRUE(ptr3.get() != nullptr);
+ ptr3.Assign(AllocWithGrowth(space, self, 2 * MB, &ptr3_bytes_allocated, &ptr3_usable_size));
+ EXPECT_TRUE(ptr3.Get() != nullptr);
EXPECT_LE(2U * MB, ptr3_bytes_allocated);
EXPECT_LE(2U * MB, ptr3_usable_size);
EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
- space->Free(self, ptr3.reset(nullptr));
+ space->Free(self, ptr3.Assign(nullptr));
// Final clean up.
- free1 = space->AllocationSize(ptr1.get(), nullptr);
- space->Free(self, ptr1.reset(nullptr));
+ free1 = space->AllocationSize(ptr1.Get(), nullptr);
+ space->Free(self, ptr1.Assign(nullptr));
EXPECT_LE(1U * MB, free1);
}
@@ -279,9 +283,10 @@ void SpaceTest::AllocAndFreeTestBody(CreateSpaceFn create_space) {
// Succeeds, fits without adjusting the footprint limit.
size_t ptr1_bytes_allocated, ptr1_usable_size;
- SirtRef<mirror::Object> ptr1(self, Alloc(space, self, 1 * MB, &ptr1_bytes_allocated,
- &ptr1_usable_size));
- EXPECT_TRUE(ptr1.get() != nullptr);
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::Object> ptr1(
+ hs.NewHandle(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size)));
+ EXPECT_TRUE(ptr1.Get() != nullptr);
EXPECT_LE(1U * MB, ptr1_bytes_allocated);
EXPECT_LE(1U * MB, ptr1_usable_size);
EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
@@ -292,9 +297,9 @@ void SpaceTest::AllocAndFreeTestBody(CreateSpaceFn create_space) {
// Succeeds, adjusts the footprint.
size_t ptr3_bytes_allocated, ptr3_usable_size;
- SirtRef<mirror::Object> ptr3(self, AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated,
- &ptr3_usable_size));
- EXPECT_TRUE(ptr3.get() != nullptr);
+ Handle<mirror::Object> ptr3(
+ hs.NewHandle(AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated, &ptr3_usable_size)));
+ EXPECT_TRUE(ptr3.Get() != nullptr);
EXPECT_LE(8U * MB, ptr3_bytes_allocated);
EXPECT_LE(8U * MB, ptr3_usable_size);
EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
@@ -308,23 +313,23 @@ void SpaceTest::AllocAndFreeTestBody(CreateSpaceFn create_space) {
EXPECT_TRUE(ptr5 == nullptr);
// Release some memory.
- size_t free3 = space->AllocationSize(ptr3.get(), nullptr);
+ size_t free3 = space->AllocationSize(ptr3.Get(), nullptr);
EXPECT_EQ(free3, ptr3_bytes_allocated);
- space->Free(self, ptr3.reset(nullptr));
+ space->Free(self, ptr3.Assign(nullptr));
EXPECT_LE(8U * MB, free3);
// Succeeds, now that memory has been freed.
size_t ptr6_bytes_allocated, ptr6_usable_size;
- SirtRef<mirror::Object> ptr6(self, AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated,
- &ptr6_usable_size));
- EXPECT_TRUE(ptr6.get() != nullptr);
+ Handle<mirror::Object> ptr6(
+ hs.NewHandle(AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated, &ptr6_usable_size)));
+ EXPECT_TRUE(ptr6.Get() != nullptr);
EXPECT_LE(9U * MB, ptr6_bytes_allocated);
EXPECT_LE(9U * MB, ptr6_usable_size);
EXPECT_LE(ptr6_usable_size, ptr6_bytes_allocated);
// Final clean up.
- size_t free1 = space->AllocationSize(ptr1.get(), nullptr);
- space->Free(self, ptr1.reset(nullptr));
+ size_t free1 = space->AllocationSize(ptr1.Get(), nullptr);
+ space->Free(self, ptr1.Assign(nullptr));
EXPECT_LE(1U * MB, free1);
}
@@ -345,8 +350,6 @@ void SpaceTest::AllocAndFreeListTestBody(CreateSpaceFn create_space) {
lots_of_objects[i] = Alloc(space, self, size_of_zero_length_byte_array, &allocation_size,
&usable_size);
EXPECT_TRUE(lots_of_objects[i] != nullptr);
- SirtRef<mirror::Object> obj(self, lots_of_objects[i]);
- lots_of_objects[i] = obj.get();
size_t computed_usable_size;
EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i], &computed_usable_size));
EXPECT_EQ(usable_size, computed_usable_size);
@@ -360,8 +363,6 @@ void SpaceTest::AllocAndFreeListTestBody(CreateSpaceFn create_space) {
size_t allocation_size, usable_size;
lots_of_objects[i] = AllocWithGrowth(space, self, 1024, &allocation_size, &usable_size);
EXPECT_TRUE(lots_of_objects[i] != nullptr);
- SirtRef<mirror::Object> obj(self, lots_of_objects[i]);
- lots_of_objects[i] = obj.get();
size_t computed_usable_size;
EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i], &computed_usable_size));
EXPECT_EQ(usable_size, computed_usable_size);
@@ -418,18 +419,19 @@ void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t
alloc_size = size_of_zero_length_byte_array;
}
}
- SirtRef<mirror::Object> object(self, nullptr);
+ StackHandleScope<1> hs(soa.Self());
+ auto object(hs.NewHandle<mirror::Object>(nullptr));
size_t bytes_allocated = 0;
if (round <= 1) {
- object.reset(Alloc(space, self, alloc_size, &bytes_allocated, nullptr));
+ object.Assign(Alloc(space, self, alloc_size, &bytes_allocated, nullptr));
} else {
- object.reset(AllocWithGrowth(space, self, alloc_size, &bytes_allocated, nullptr));
+ object.Assign(AllocWithGrowth(space, self, alloc_size, &bytes_allocated, nullptr));
}
footprint = space->GetFootprint();
EXPECT_GE(space->Size(), footprint); // invariant
- if (object.get() != nullptr) { // allocation succeeded
- lots_of_objects[i] = object.get();
- size_t allocation_size = space->AllocationSize(object.get(), nullptr);
+ if (object.Get() != nullptr) { // allocation succeeded
+ lots_of_objects[i] = object.Get();
+ size_t allocation_size = space->AllocationSize(object.Get(), nullptr);
EXPECT_EQ(bytes_allocated, allocation_size);
if (object_size > 0) {
EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
@@ -509,16 +511,17 @@ void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t
space->RevokeAllThreadLocalBuffers();
// All memory was released, try a large allocation to check freed memory is being coalesced
- SirtRef<mirror::Object> large_object(self, nullptr);
+ StackHandleScope<1> hs(soa.Self());
+ auto large_object(hs.NewHandle<mirror::Object>(nullptr));
size_t three_quarters_space = (growth_limit / 2) + (growth_limit / 4);
size_t bytes_allocated = 0;
if (round <= 1) {
- large_object.reset(Alloc(space, self, three_quarters_space, &bytes_allocated, nullptr));
+ large_object.Assign(Alloc(space, self, three_quarters_space, &bytes_allocated, nullptr));
} else {
- large_object.reset(AllocWithGrowth(space, self, three_quarters_space, &bytes_allocated,
- nullptr));
+ large_object.Assign(AllocWithGrowth(space, self, three_quarters_space, &bytes_allocated,
+ nullptr));
}
- EXPECT_TRUE(large_object.get() != nullptr);
+ EXPECT_TRUE(large_object.Get() != nullptr);
// Sanity check footprint
footprint = space->GetFootprint();
@@ -527,7 +530,7 @@ void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t
EXPECT_LE(space->Size(), growth_limit);
// Clean up
- space->Free(self, large_object.reset(nullptr));
+ space->Free(self, large_object.Assign(nullptr));
// Sanity check footprint
footprint = space->GetFootprint();
diff --git a/runtime/handle.h b/runtime/handle.h
new file mode 100644
index 0000000..3127864
--- /dev/null
+++ b/runtime/handle.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_HANDLE_H_
+#define ART_RUNTIME_HANDLE_H_
+
+#include "base/casts.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "stack.h"
+
+namespace art {
+
+class Thread;
+
+template<class T>
+class Handle {
+ public:
+ Handle() : reference_(nullptr) {
+ }
+ Handle(const Handle<T>& handle) ALWAYS_INLINE : reference_(handle.reference_) {
+ }
+ Handle<T>& operator=(const Handle<T>& handle) ALWAYS_INLINE {
+ reference_ = handle.reference_;
+ return *this;
+ }
+ explicit Handle(StackReference<T>* reference) ALWAYS_INLINE : reference_(reference) {
+ }
+ T& operator*() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+ return *Get();
+ }
+ T* operator->() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+ return Get();
+ }
+ T* Get() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+ return reference_->AsMirrorPtr();
+ }
+ T* Assign(T* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+ T* old = reference_->AsMirrorPtr();
+ reference_->Assign(reference);
+ return old;
+ }
+ jobject ToJObject() const ALWAYS_INLINE {
+ return reinterpret_cast<jobject>(reference_);
+ }
+
+ private:
+ StackReference<T>* reference_;
+
+ template<typename S>
+ explicit Handle(StackReference<S>* reference)
+ : reference_(reinterpret_cast<StackReference<T>*>(reference)) {
+ }
+
+ template<typename S>
+ explicit Handle(const Handle<S>& handle)
+ : reference_(reinterpret_cast<StackReference<T>*>(handle.reference_)) {
+ }
+
+ template<class S> friend class Handle;
+ friend class HandleScope;
+ template<class S> friend class HandleWrapper;
+ template<size_t kNumReferences> friend class StackHandleScope;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_HANDLE_H_
diff --git a/runtime/handle_scope-inl.h b/runtime/handle_scope-inl.h
new file mode 100644
index 0000000..b9b51fd
--- /dev/null
+++ b/runtime/handle_scope-inl.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_HANDLE_SCOPE_INL_H_
+#define ART_RUNTIME_HANDLE_SCOPE_INL_H_
+
+#include "handle_scope-inl.h"
+
+#include "handle.h"
+#include "thread.h"
+
+namespace art {
+
+template<size_t kNumReferences>
+StackHandleScope<kNumReferences>::StackHandleScope(Thread* self)
+ : HandleScope(kNumReferences), self_(self), pos_(0) {
+ // TODO: Figure out how to use a compile assert.
+ DCHECK_EQ(OFFSETOF_MEMBER(HandleScope, references_),
+ OFFSETOF_MEMBER(StackHandleScope<1>, references_storage_));
+ for (size_t i = 0; i < kNumReferences; ++i) {
+ SetReference(i, nullptr);
+ }
+ self_->PushHandleScope(this);
+}
+
+template<size_t kNumReferences>
+StackHandleScope<kNumReferences>::~StackHandleScope() {
+ HandleScope* top_handle_scope = self_->PopHandleScope();
+ DCHECK_EQ(top_handle_scope, this);
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_HANDLE_SCOPE_INL_H_
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
new file mode 100644
index 0000000..27c1bdc
--- /dev/null
+++ b/runtime/handle_scope.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_HANDLE_SCOPE_H_
+#define ART_RUNTIME_HANDLE_SCOPE_H_
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "handle.h"
+#include "stack.h"
+#include "utils.h"
+
+namespace art {
+namespace mirror {
+class Object;
+}
+class Thread;
+
+// HandleScopes can be allocated within the bridge frame between managed and native code backed by
+// stack storage or manually allocated in native.
+class HandleScope {
+ public:
+ ~HandleScope() {}
+
+ // Number of references contained within this handle scope.
+ uint32_t NumberOfReferences() const {
+ return number_of_references_;
+ }
+
+ // We have versions with and without explicit pointer size of the following. The first two are
+ // used at runtime, so OFFSETOF_MEMBER computes the right offsets automatically. The last one
+ // takes the pointer size explicitly so that at compile time we can cross-compile correctly.
+
+ // Returns the size of a HandleScope containing num_references handles.
+ static size_t SizeOf(uint32_t num_references) {
+ size_t header_size = OFFSETOF_MEMBER(HandleScope, references_);
+ size_t data_size = sizeof(StackReference<mirror::Object>) * num_references;
+ return header_size + data_size;
+ }
+
+ // Get the size of the handle scope for the number of entries, with padding added for potential alignment.
+ static size_t GetAlignedHandleScopeSize(uint32_t num_references) {
+ size_t handle_scope_size = SizeOf(num_references);
+ return RoundUp(handle_scope_size, 8);
+ }
+
+ // Get the size of the handle scope for the number of entries, with padding added for potential alignment.
+ static size_t GetAlignedHandleScopeSizeTarget(size_t pointer_size, uint32_t num_references) {
+ // Assume that the layout is packed.
+ size_t header_size = pointer_size + sizeof(number_of_references_);
+ // This assumes there is no layout change between 32 and 64b.
+ size_t data_size = sizeof(StackReference<mirror::Object>) * num_references;
+ size_t handle_scope_size = header_size + data_size;
+ return RoundUp(handle_scope_size, 8);
+ }
+
+ // Link to previous HandleScope or null.
+ HandleScope* GetLink() const {
+ return link_;
+ }
+
+ void SetLink(HandleScope* link) {
+ DCHECK_NE(this, link);
+ link_ = link;
+ }
+
+ // Sets the number_of_references_ field for constructing tables out of raw memory. Warning: will
+ // not resize anything.
+ void SetNumberOfReferences(uint32_t num_references) {
+ number_of_references_ = num_references;
+ }
+
+ mirror::Object* GetReference(size_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ ALWAYS_INLINE {
+ DCHECK_LT(i, number_of_references_);
+ return references_[i].AsMirrorPtr();
+ }
+
+ Handle<mirror::Object> GetHandle(size_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ ALWAYS_INLINE {
+ DCHECK_LT(i, number_of_references_);
+ return Handle<mirror::Object>(&references_[i]);
+ }
+
+ void SetReference(size_t i, mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ ALWAYS_INLINE {
+ DCHECK_LT(i, number_of_references_);
+ references_[i].Assign(object);
+ }
+
+ bool Contains(StackReference<mirror::Object>* handle_scope_entry) const {
+ // A HandleScope should always contain something. One created by the
+ // jni_compiler should have a jobject/jclass as a native method is
+ // passed in a this pointer or a class
+ DCHECK_GT(number_of_references_, 0U);
+ return ((&references_[0] <= handle_scope_entry)
+ && (handle_scope_entry <= (&references_[number_of_references_ - 1])));
+ }
+
+ // Offset of link within HandleScope, used by generated code
+ static size_t LinkOffset(size_t pointer_size) {
+ return 0;
+ }
+
+ // Offset of length within handle scope, used by generated code
+ static size_t NumberOfReferencesOffset(size_t pointer_size) {
+ return pointer_size;
+ }
+
+ // Offset of link within handle scope, used by generated code
+ static size_t ReferencesOffset(size_t pointer_size) {
+ return pointer_size + sizeof(number_of_references_);
+ }
+
+ protected:
+ explicit HandleScope(size_t number_of_references) :
+ link_(nullptr), number_of_references_(number_of_references) {
+ }
+
+ HandleScope* link_;
+ uint32_t number_of_references_;
+
+ // number_of_references_ are available if this is allocated and filled in by jni_compiler.
+ StackReference<mirror::Object> references_[0];
+
+ private:
+ template<size_t kNumReferences> friend class StackHandleScope;
+ DISALLOW_COPY_AND_ASSIGN(HandleScope);
+};
+
+// A wrapper which wraps around Object** and restores the pointer in the destructor.
+// TODO: Add more functionality.
+template<class T>
+class HandleWrapper {
+ public:
+ HandleWrapper(T** obj, const Handle<T>& handle)
+ : obj_(obj), handle_(handle) {
+ }
+
+ ~HandleWrapper() {
+ *obj_ = handle_.Get();
+ }
+
+ private:
+ T** obj_;
+ Handle<T> handle_;
+};
+
+// Scoped handle storage of a fixed size that is usually stack allocated.
+template<size_t kNumReferences>
+class StackHandleScope : public HandleScope {
+ public:
+ explicit StackHandleScope(Thread* self);
+ ~StackHandleScope();
+
+ template<class T>
+ Handle<T> NewHandle(T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SetReference(pos_, object);
+ return Handle<T>(GetHandle(pos_++));
+ }
+
+ template<class T>
+ HandleWrapper<T> NewHandleWrapper(T** object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SetReference(pos_, *object);
+ Handle<T> h(GetHandle(pos_++));
+ return HandleWrapper<T>(object, h);
+ }
+
+ private:
+ // references_storage_ needs to be first so that it matches the address of references_.
+ StackReference<mirror::Object> references_storage_[kNumReferences];
+ Thread* const self_;
+ size_t pos_;
+
+ template<size_t kNumRefs> friend class StackHandleScope;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_HANDLE_SCOPE_H_
diff --git a/runtime/stack_indirect_reference_table_test.cc b/runtime/handle_scope_test.cc
index 72ef6b6..de563c1 100644
--- a/runtime/stack_indirect_reference_table_test.cc
+++ b/runtime/handle_scope_test.cc
@@ -14,33 +14,49 @@
* limitations under the License.
*/
-#include "stack_indirect_reference_table.h"
#include "gtest/gtest.h"
+#include "handle_scope-inl.h"
+#include "scoped_thread_state_change.h"
+#include "thread.h"
namespace art {
-// Test the offsets computed for members of StackIndirectReferenceTable. Because of cross-compiling
+// Handle scope with a fixed size which is allocated on the stack.
+template<size_t kNumReferences>
+class NoThreadStackHandleScope : public HandleScope {
+ public:
+ explicit NoThreadStackHandleScope() : HandleScope(kNumReferences) {
+ }
+ ~NoThreadStackHandleScope() {
+ }
+
+ private:
+ // references_storage_ needs to be first so that it matches the address of references_
+ StackReference<mirror::Object> references_storage_[kNumReferences];
+};
+
+// Test the offsets computed for members of HandleScope. Because of cross-compiling
// it is impossible the use OFFSETOF_MEMBER, so we do some reasonable computations ourselves. This
// test checks whether we do the right thing.
-TEST(StackIndirectReferenceTableTest, Offsets) {
- // As the members of StackIndirectReferenceTable are private, we cannot use OFFSETOF_MEMBER
+TEST(HandleScopeTest, Offsets) NO_THREAD_SAFETY_ANALYSIS {
+ // As the members of HandleScope are private, we cannot use OFFSETOF_MEMBER
// here. So do the inverse: set some data, and access it through pointers created from the offsets.
-
- StackIndirectReferenceTable test_table(reinterpret_cast<mirror::Object*>(0x1234));
- test_table.SetLink(reinterpret_cast<StackIndirectReferenceTable*>(0x5678));
+ NoThreadStackHandleScope<1> test_table;
+ test_table.SetReference(0, reinterpret_cast<mirror::Object*>(0x1234));
+ test_table.SetLink(reinterpret_cast<HandleScope*>(0x5678));
test_table.SetNumberOfReferences(0x9ABC);
byte* table_base_ptr = reinterpret_cast<byte*>(&test_table);
{
uintptr_t* link_ptr = reinterpret_cast<uintptr_t*>(table_base_ptr +
- StackIndirectReferenceTable::LinkOffset(kPointerSize));
+ HandleScope::LinkOffset(kPointerSize));
EXPECT_EQ(*link_ptr, static_cast<size_t>(0x5678));
}
{
uint32_t* num_ptr = reinterpret_cast<uint32_t*>(table_base_ptr +
- StackIndirectReferenceTable::NumberOfReferencesOffset(kPointerSize));
+ HandleScope::NumberOfReferencesOffset(kPointerSize));
EXPECT_EQ(*num_ptr, static_cast<size_t>(0x9ABC));
}
@@ -50,7 +66,7 @@ TEST(StackIndirectReferenceTableTest, Offsets) {
EXPECT_EQ(sizeof(StackReference<mirror::Object>), sizeof(uint32_t));
uint32_t* ref_ptr = reinterpret_cast<uint32_t*>(table_base_ptr +
- StackIndirectReferenceTable::ReferencesOffset(kPointerSize));
+ HandleScope::ReferencesOffset(kPointerSize));
EXPECT_EQ(*ref_ptr, static_cast<uint32_t>(0x1234));
}
}
diff --git a/runtime/indirect_reference_table-inl.h b/runtime/indirect_reference_table-inl.h
index 1a28347..42a9757 100644
--- a/runtime/indirect_reference_table-inl.h
+++ b/runtime/indirect_reference_table-inl.h
@@ -33,7 +33,7 @@ inline bool IndirectReferenceTable::GetChecked(IndirectRef iref) const {
LOG(WARNING) << "Attempt to look up NULL " << kind_;
return false;
}
- if (UNLIKELY(GetIndirectRefKind(iref) == kSirtOrInvalid)) {
+ if (UNLIKELY(GetIndirectRefKind(iref) == kHandleScopeOrInvalid)) {
LOG(ERROR) << "JNI ERROR (app bug): invalid " << kind_ << " " << iref;
AbortIfNoCheckJNI();
return false;
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index b81e43a..432481b 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -66,7 +66,7 @@ IndirectReferenceTable::IndirectReferenceTable(size_t initialCount,
size_t maxCount, IndirectRefKind desiredKind) {
CHECK_GT(initialCount, 0U);
CHECK_LE(initialCount, maxCount);
- CHECK_NE(desiredKind, kSirtOrInvalid);
+ CHECK_NE(desiredKind, kHandleScopeOrInvalid);
std::string error_str;
const size_t initial_bytes = initialCount * sizeof(const mirror::Object*);
@@ -184,9 +184,9 @@ bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) {
int idx = ExtractIndex(iref);
- if (GetIndirectRefKind(iref) == kSirtOrInvalid &&
- Thread::Current()->SirtContains(reinterpret_cast<jobject>(iref))) {
- LOG(WARNING) << "Attempt to remove local SIRT entry from IRT, ignoring";
+ if (GetIndirectRefKind(iref) == kHandleScopeOrInvalid &&
+ Thread::Current()->HandleScopeContains(reinterpret_cast<jobject>(iref))) {
+ LOG(WARNING) << "Attempt to remove local handle scope entry from IRT, ignoring";
return true;
}
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index f365acc..833b07a 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -111,7 +111,7 @@ static mirror::Object* const kClearedJniWeakGlobal = reinterpret_cast<mirror::Ob
* For convenience these match up with enum jobjectRefType from jni.h.
*/
enum IndirectRefKind {
- kSirtOrInvalid = 0, // <<stack indirect reference table or invalid reference>>
+ kHandleScopeOrInvalid = 0, // <<stack indirect reference table or invalid reference>>
kLocal = 1, // <<local reference>>
kGlobal = 2, // <<global reference>>
kWeakGlobal = 3 // <<weak global reference>>
diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc
index 8987127..5995d9e 100644
--- a/runtime/intern_table_test.cc
+++ b/runtime/intern_table_test.cc
@@ -18,7 +18,7 @@
#include "common_runtime_test.h"
#include "mirror/object.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
namespace art {
@@ -27,19 +27,21 @@ class InternTableTest : public CommonRuntimeTest {};
TEST_F(InternTableTest, Intern) {
ScopedObjectAccess soa(Thread::Current());
InternTable intern_table;
- SirtRef<mirror::String> foo_1(soa.Self(), intern_table.InternStrong(3, "foo"));
- SirtRef<mirror::String> foo_2(soa.Self(), intern_table.InternStrong(3, "foo"));
- SirtRef<mirror::String> foo_3(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo"));
- SirtRef<mirror::String> bar(soa.Self(), intern_table.InternStrong(3, "bar"));
+ StackHandleScope<4> hs(soa.Self());
+ Handle<mirror::String> foo_1(hs.NewHandle(intern_table.InternStrong(3, "foo")));
+ Handle<mirror::String> foo_2(hs.NewHandle(intern_table.InternStrong(3, "foo")));
+ Handle<mirror::String> foo_3(
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo")));
+ Handle<mirror::String> bar(hs.NewHandle(intern_table.InternStrong(3, "bar")));
EXPECT_TRUE(foo_1->Equals("foo"));
EXPECT_TRUE(foo_2->Equals("foo"));
EXPECT_TRUE(foo_3->Equals("foo"));
- EXPECT_TRUE(foo_1.get() != NULL);
- EXPECT_TRUE(foo_2.get() != NULL);
- EXPECT_EQ(foo_1.get(), foo_2.get());
- EXPECT_NE(foo_1.get(), bar.get());
- EXPECT_NE(foo_2.get(), bar.get());
- EXPECT_NE(foo_3.get(), bar.get());
+ EXPECT_TRUE(foo_1.Get() != NULL);
+ EXPECT_TRUE(foo_2.Get() != NULL);
+ EXPECT_EQ(foo_1.Get(), foo_2.Get());
+ EXPECT_NE(foo_1.Get(), bar.Get());
+ EXPECT_NE(foo_2.Get(), bar.Get());
+ EXPECT_NE(foo_3.Get(), bar.Get());
}
TEST_F(InternTableTest, Size) {
@@ -47,8 +49,10 @@ TEST_F(InternTableTest, Size) {
InternTable t;
EXPECT_EQ(0U, t.Size());
t.InternStrong(3, "foo");
- SirtRef<mirror::String> foo(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo"));
- t.InternWeak(foo.get());
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::String> foo(
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo")));
+ t.InternWeak(foo.Get());
EXPECT_EQ(1U, t.Size());
t.InternStrong(3, "bar");
EXPECT_EQ(2U, t.Size());
@@ -93,19 +97,20 @@ TEST_F(InternTableTest, SweepInternTableWeaks) {
InternTable t;
t.InternStrong(3, "foo");
t.InternStrong(3, "bar");
- SirtRef<mirror::String> hello(soa.Self(),
- mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello"));
- SirtRef<mirror::String> world(soa.Self(),
- mirror::String::AllocFromModifiedUtf8(soa.Self(), "world"));
- SirtRef<mirror::String> s0(soa.Self(), t.InternWeak(hello.get()));
- SirtRef<mirror::String> s1(soa.Self(), t.InternWeak(world.get()));
+ StackHandleScope<5> hs(soa.Self());
+ Handle<mirror::String> hello(
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello")));
+ Handle<mirror::String> world(
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "world")));
+ Handle<mirror::String> s0(hs.NewHandle(t.InternWeak(hello.Get())));
+ Handle<mirror::String> s1(hs.NewHandle(t.InternWeak(world.Get())));
EXPECT_EQ(4U, t.Size());
// We should traverse only the weaks...
TestPredicate p;
- p.Expect(s0.get());
- p.Expect(s1.get());
+ p.Expect(s0.Get());
+ p.Expect(s1.Get());
{
ReaderMutexLock mu(soa.Self(), *Locks::heap_bitmap_lock_);
t.SweepInternTableWeaks(IsMarkedSweepingCallback, &p);
@@ -114,9 +119,9 @@ TEST_F(InternTableTest, SweepInternTableWeaks) {
EXPECT_EQ(2U, t.Size());
// Just check that we didn't corrupt the map.
- SirtRef<mirror::String> still_here(soa.Self(),
- mirror::String::AllocFromModifiedUtf8(soa.Self(), "still here"));
- t.InternWeak(still_here.get());
+ Handle<mirror::String> still_here(
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "still here")));
+ t.InternWeak(still_here.Get());
EXPECT_EQ(3U, t.Size());
}
@@ -125,48 +130,53 @@ TEST_F(InternTableTest, ContainsWeak) {
{
// Strongs are never weak.
InternTable t;
- SirtRef<mirror::String> interned_foo_1(soa.Self(), t.InternStrong(3, "foo"));
- EXPECT_FALSE(t.ContainsWeak(interned_foo_1.get()));
- SirtRef<mirror::String> interned_foo_2(soa.Self(), t.InternStrong(3, "foo"));
- EXPECT_FALSE(t.ContainsWeak(interned_foo_2.get()));
- EXPECT_EQ(interned_foo_1.get(), interned_foo_2.get());
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::String> interned_foo_1(hs.NewHandle(t.InternStrong(3, "foo")));
+ EXPECT_FALSE(t.ContainsWeak(interned_foo_1.Get()));
+ Handle<mirror::String> interned_foo_2(hs.NewHandle(t.InternStrong(3, "foo")));
+ EXPECT_FALSE(t.ContainsWeak(interned_foo_2.Get()));
+ EXPECT_EQ(interned_foo_1.Get(), interned_foo_2.Get());
}
{
// Weaks are always weak.
InternTable t;
- SirtRef<mirror::String> foo_1(soa.Self(),
- mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo"));
- SirtRef<mirror::String> foo_2(soa.Self(),
- mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo"));
- EXPECT_NE(foo_1.get(), foo_2.get());
- SirtRef<mirror::String> interned_foo_1(soa.Self(), t.InternWeak(foo_1.get()));
- SirtRef<mirror::String> interned_foo_2(soa.Self(), t.InternWeak(foo_2.get()));
- EXPECT_TRUE(t.ContainsWeak(interned_foo_2.get()));
- EXPECT_EQ(interned_foo_1.get(), interned_foo_2.get());
+ StackHandleScope<4> hs(soa.Self());
+ Handle<mirror::String> foo_1(
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo")));
+ Handle<mirror::String> foo_2(
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo")));
+ EXPECT_NE(foo_1.Get(), foo_2.Get());
+ Handle<mirror::String> interned_foo_1(hs.NewHandle(t.InternWeak(foo_1.Get())));
+ Handle<mirror::String> interned_foo_2(hs.NewHandle(t.InternWeak(foo_2.Get())));
+ EXPECT_TRUE(t.ContainsWeak(interned_foo_2.Get()));
+ EXPECT_EQ(interned_foo_1.Get(), interned_foo_2.Get());
}
{
// A weak can be promoted to a strong.
InternTable t;
- SirtRef<mirror::String> foo(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo"));
- SirtRef<mirror::String> interned_foo_1(soa.Self(), t.InternWeak(foo.get()));
- EXPECT_TRUE(t.ContainsWeak(interned_foo_1.get()));
- SirtRef<mirror::String> interned_foo_2(soa.Self(), t.InternStrong(3, "foo"));
- EXPECT_FALSE(t.ContainsWeak(interned_foo_2.get()));
- EXPECT_EQ(interned_foo_1.get(), interned_foo_2.get());
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::String> foo(
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo")));
+ Handle<mirror::String> interned_foo_1(hs.NewHandle(t.InternWeak(foo.Get())));
+ EXPECT_TRUE(t.ContainsWeak(interned_foo_1.Get()));
+ Handle<mirror::String> interned_foo_2(hs.NewHandle(t.InternStrong(3, "foo")));
+ EXPECT_FALSE(t.ContainsWeak(interned_foo_2.Get()));
+ EXPECT_EQ(interned_foo_1.Get(), interned_foo_2.Get());
}
{
// Interning a weak after a strong gets you the strong.
InternTable t;
- SirtRef<mirror::String> interned_foo_1(soa.Self(), t.InternStrong(3, "foo"));
- EXPECT_FALSE(t.ContainsWeak(interned_foo_1.get()));
- SirtRef<mirror::String> foo(soa.Self(),
- mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo"));
- SirtRef<mirror::String> interned_foo_2(soa.Self(), t.InternWeak(foo.get()));
- EXPECT_FALSE(t.ContainsWeak(interned_foo_2.get()));
- EXPECT_EQ(interned_foo_1.get(), interned_foo_2.get());
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::String> interned_foo_1(hs.NewHandle(t.InternStrong(3, "foo")));
+ EXPECT_FALSE(t.ContainsWeak(interned_foo_1.Get()));
+ Handle<mirror::String> foo(
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "foo")));
+ Handle<mirror::String> interned_foo_2(hs.NewHandle(t.InternWeak(foo.Get())));
+ EXPECT_FALSE(t.ContainsWeak(interned_foo_2.Get()));
+ EXPECT_EQ(interned_foo_1.Get(), interned_foo_2.Get());
}
}
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index a87f95c..20e2b8d 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -71,10 +71,10 @@ static void UnstartedRuntimeJni(Thread* self, ArtMethod* method,
} else if (name == "int java.lang.String.fastIndexOf(int, int)") {
result->SetI(receiver->AsString()->FastIndexOf(args[0], args[1]));
} else if (name == "java.lang.Object java.lang.reflect.Array.createMultiArray(java.lang.Class, int[])") {
- SirtRef<mirror::Class> sirt_class(self, reinterpret_cast<Object*>(args[0])->AsClass());
- SirtRef<mirror::IntArray> sirt_dimensions(self,
- reinterpret_cast<Object*>(args[1])->AsIntArray());
- result->SetL(Array::CreateMultiArray(self, sirt_class, sirt_dimensions));
+ StackHandleScope<2> hs(self);
+ auto h_class(hs.NewHandle(reinterpret_cast<mirror::Class*>(args[0])->AsClass()));
+ auto h_dimensions(hs.NewHandle(reinterpret_cast<mirror::IntArray*>(args[1])->AsIntArray()));
+ result->SetL(Array::CreateMultiArray(self, h_class, h_dimensions));
} else if (name == "java.lang.Object java.lang.Throwable.nativeFillInStackTrace()") {
ScopedObjectAccessUnchecked soa(self);
if (Runtime::Current()->IsActiveTransaction()) {
@@ -455,8 +455,9 @@ void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, Object* receive
// Do this after populating the shadow frame in case EnsureInitialized causes a GC.
if (method->IsStatic() && UNLIKELY(!method->GetDeclaringClass()->IsInitializing())) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- SirtRef<mirror::Class> sirt_c(self, method->GetDeclaringClass());
- if (UNLIKELY(!class_linker->EnsureInitialized(sirt_c, true, true))) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass()));
+ if (UNLIKELY(!class_linker->EnsureInitialized(h_class, true, true))) {
CHECK(self->IsExceptionPending());
self->PopShadowFrame();
return;
@@ -522,7 +523,8 @@ extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper& mh
ArtMethod* method = shadow_frame->GetMethod();
// Ensure static methods are initialized.
if (method->IsStatic()) {
- SirtRef<Class> declaringClass(self, method->GetDeclaringClass());
+ StackHandleScope<1> hs(self);
+ Handle<Class> declaringClass(hs.NewHandle(method->GetDeclaringClass()));
if (UNLIKELY(!declaringClass->IsInitializing())) {
if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(declaringClass, true,
true))) {
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index ee6a869..c5fb0d8 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -296,7 +296,9 @@ static void UnstartedRuntimeInvoke(Thread* self, MethodHelper& mh,
// other variants that take more arguments should also be added.
std::string descriptor(DotToDescriptor(shadow_frame->GetVRegReference(arg_offset)->AsString()->ToModifiedUtf8().c_str()));
- SirtRef<ClassLoader> class_loader(self, nullptr); // shadow_frame.GetMethod()->GetDeclaringClass()->GetClassLoader();
+ StackHandleScope<1> hs(self);
+ // shadow_frame.GetMethod()->GetDeclaringClass()->GetClassLoader();
+ auto class_loader = hs.NewHandle<ClassLoader>(nullptr);
Class* found = Runtime::Current()->GetClassLinker()->FindClass(self, descriptor.c_str(),
class_loader);
CHECK(found != NULL) << "Class.forName failed in un-started runtime for class: "
@@ -305,7 +307,9 @@ static void UnstartedRuntimeInvoke(Thread* self, MethodHelper& mh,
} else if (name == "java.lang.Class java.lang.Void.lookupType()") {
result->SetL(Runtime::Current()->GetClassLinker()->FindPrimitiveClass('V'));
} else if (name == "java.lang.Class java.lang.VMClassLoader.findLoadedClass(java.lang.ClassLoader, java.lang.String)") {
- SirtRef<ClassLoader> class_loader(self, down_cast<mirror::ClassLoader*>(shadow_frame->GetVRegReference(arg_offset)));
+ StackHandleScope<1> hs(self);
+ Handle<ClassLoader> class_loader(
+ hs.NewHandle(down_cast<mirror::ClassLoader*>(shadow_frame->GetVRegReference(arg_offset))));
std::string descriptor(DotToDescriptor(shadow_frame->GetVRegReference(arg_offset + 1)->AsString()->ToModifiedUtf8().c_str()));
Class* found = Runtime::Current()->GetClassLinker()->FindClass(self, descriptor.c_str(),
@@ -315,10 +319,11 @@ static void UnstartedRuntimeInvoke(Thread* self, MethodHelper& mh,
Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
ArtMethod* c = klass->FindDeclaredDirectMethod("<init>", "()V");
CHECK(c != NULL);
- SirtRef<Object> obj(self, klass->AllocObject(self));
- CHECK(obj.get() != NULL);
- EnterInterpreterFromInvoke(self, c, obj.get(), NULL, NULL);
- result->SetL(obj.get());
+ StackHandleScope<1> hs(self);
+ Handle<Object> obj(hs.NewHandle(klass->AllocObject(self)));
+ CHECK(obj.Get() != NULL);
+ EnterInterpreterFromInvoke(self, c, obj.Get(), NULL, NULL);
+ result->SetL(obj.Get());
} else if (name == "java.lang.reflect.Field java.lang.Class.getDeclaredField(java.lang.String)") {
// Special managed code cut-out to allow field lookup in a un-started runtime that'd fail
// going the reflective Dex way.
@@ -350,13 +355,14 @@ static void UnstartedRuntimeInvoke(Thread* self, MethodHelper& mh,
// TODO: getDeclaredField calls GetType once the field is found to ensure a
// NoClassDefFoundError is thrown if the field's type cannot be resolved.
Class* jlr_Field = self->DecodeJObject(WellKnownClasses::java_lang_reflect_Field)->AsClass();
- SirtRef<Object> field(self, jlr_Field->AllocNonMovableObject(self));
- CHECK(field.get() != NULL);
+ StackHandleScope<1> hs(self);
+ Handle<Object> field(hs.NewHandle(jlr_Field->AllocNonMovableObject(self)));
+ CHECK(field.Get() != NULL);
ArtMethod* c = jlr_Field->FindDeclaredDirectMethod("<init>", "(Ljava/lang/reflect/ArtField;)V");
uint32_t args[1];
args[0] = StackReference<mirror::Object>::FromMirrorPtr(found).AsVRegValue();
- EnterInterpreterFromInvoke(self, c, field.get(), args, NULL);
- result->SetL(field.get());
+ EnterInterpreterFromInvoke(self, c, field.Get(), args, NULL);
+ result->SetL(field.Get());
} else if (name == "int java.lang.Object.hashCode()") {
Object* obj = shadow_frame->GetVRegReference(arg_offset);
result->SetI(obj->IdentityHashCode());
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 819b79d..af8b534 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -410,8 +410,9 @@ static inline String* ResolveString(Thread* self, MethodHelper& mh, uint32_t str
Class* java_lang_string_class = String::GetJavaLangString();
if (UNLIKELY(!java_lang_string_class->IsInitialized())) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- SirtRef<mirror::Class> sirt_class(self, java_lang_string_class);
- if (UNLIKELY(!class_linker->EnsureInitialized(sirt_class, true, true))) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_class(hs.NewHandle(java_lang_string_class));
+ if (UNLIKELY(!class_linker->EnsureInitialized(h_class, true, true))) {
DCHECK(self->IsExceptionPending());
return nullptr;
}
@@ -571,7 +572,8 @@ static inline uint32_t FindNextInstructionFollowingException(Thread* self,
ThrowLocation throw_location;
mirror::Throwable* exception = self->GetException(&throw_location);
bool clear_exception = false;
- SirtRef<mirror::Class> exception_class(self, exception->GetClass());
+ StackHandleScope<3> hs(self);
+ Handle<mirror::Class> exception_class(hs.NewHandle(exception->GetClass()));
uint32_t found_dex_pc = shadow_frame.GetMethod()->FindCatchBlock(exception_class, dex_pc,
&clear_exception);
if (found_dex_pc == DexFile::kDexNoIndex) {
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 915f2c9..4634971 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -117,11 +117,12 @@ static mirror::Class* EnsureInitialized(Thread* self, mirror::Class* klass)
if (LIKELY(klass->IsInitialized())) {
return klass;
}
- SirtRef<mirror::Class> sirt_klass(self, klass);
- if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(sirt_klass, true, true)) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_klass(hs.NewHandle(klass));
+ if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_klass, true, true)) {
return nullptr;
}
- return sirt_klass.get();
+ return h_klass.Get();
}
static jmethodID FindMethodID(ScopedObjectAccess& soa, jclass jni_class,
@@ -180,16 +181,17 @@ static mirror::ClassLoader* GetClassLoader(const ScopedObjectAccess& soa)
static jfieldID FindFieldID(const ScopedObjectAccess& soa, jclass jni_class, const char* name,
const char* sig, bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SirtRef<mirror::Class> c(soa.Self(), EnsureInitialized(soa.Self(),
- soa.Decode<mirror::Class*>(jni_class)));
- if (c.get() == nullptr) {
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::Class> c(
+ hs.NewHandle(EnsureInitialized(soa.Self(), soa.Decode<mirror::Class*>(jni_class))));
+ if (c.Get() == nullptr) {
return nullptr;
}
mirror::ArtField* field = nullptr;
mirror::Class* field_type;
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
if (sig[1] != '\0') {
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), c->GetClassLoader());
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(c->GetClassLoader()));
field_type = class_linker->FindClass(soa.Self(), sig, class_loader);
} else {
field_type = class_linker->FindPrimitiveClass(*sig);
@@ -198,13 +200,14 @@ static jfieldID FindFieldID(const ScopedObjectAccess& soa, jclass jni_class, con
// Failed to find type from the signature of the field.
DCHECK(soa.Self()->IsExceptionPending());
ThrowLocation throw_location;
- SirtRef<mirror::Throwable> cause(soa.Self(), soa.Self()->GetException(&throw_location));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Throwable> cause(hs.NewHandle(soa.Self()->GetException(&throw_location)));
soa.Self()->ClearException();
soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchFieldError;",
"no type \"%s\" found and so no field \"%s\" "
"could be found in class \"%s\" or its superclasses", sig, name,
- ClassHelper(c.get()).GetDescriptor());
- soa.Self()->GetException(nullptr)->SetCause(cause.get());
+ ClassHelper(c.Get()).GetDescriptor());
+ soa.Self()->GetException(nullptr)->SetCause(cause.Get());
return nullptr;
}
if (is_static) {
@@ -216,7 +219,7 @@ static jfieldID FindFieldID(const ScopedObjectAccess& soa, jclass jni_class, con
ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchFieldError;",
"no \"%s\" field \"%s\" in class \"%s\" or its superclasses",
- sig, name, ClassHelper(c.get()).GetDescriptor());
+ sig, name, ClassHelper(c.Get()).GetDescriptor());
return nullptr;
}
return soa.EncodeField(field);
@@ -546,7 +549,8 @@ class JNI {
ScopedObjectAccess soa(env);
mirror::Class* c = nullptr;
if (runtime->IsStarted()) {
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), GetClassLoader(soa));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(GetClassLoader(soa)));
c = class_linker->FindClass(soa.Self(), descriptor.c_str(), class_loader);
} else {
c = class_linker->FindSystemClass(soa.Self(), descriptor.c_str());
@@ -658,26 +662,28 @@ class JNI {
static void ExceptionDescribe(JNIEnv* env) {
ScopedObjectAccess soa(env);
- SirtRef<mirror::Object> old_throw_this_object(soa.Self(), nullptr);
- SirtRef<mirror::ArtMethod> old_throw_method(soa.Self(), nullptr);
- SirtRef<mirror::Throwable> old_exception(soa.Self(), nullptr);
+ StackHandleScope<3> hs(soa.Self());
+ // TODO: Use nullptr instead of null handles?
+ auto old_throw_this_object(hs.NewHandle<mirror::Object>(nullptr));
+ auto old_throw_method(hs.NewHandle<mirror::ArtMethod>(nullptr));
+ auto old_exception(hs.NewHandle<mirror::Throwable>(nullptr));
uint32_t old_throw_dex_pc;
{
ThrowLocation old_throw_location;
mirror::Throwable* old_exception_obj = soa.Self()->GetException(&old_throw_location);
- old_throw_this_object.reset(old_throw_location.GetThis());
- old_throw_method.reset(old_throw_location.GetMethod());
- old_exception.reset(old_exception_obj);
+ old_throw_this_object.Assign(old_throw_location.GetThis());
+ old_throw_method.Assign(old_throw_location.GetMethod());
+ old_exception.Assign(old_exception_obj);
old_throw_dex_pc = old_throw_location.GetDexPc();
soa.Self()->ClearException();
}
ScopedLocalRef<jthrowable> exception(env,
- soa.AddLocalReference<jthrowable>(old_exception.get()));
+ soa.AddLocalReference<jthrowable>(old_exception.Get()));
ScopedLocalRef<jclass> exception_class(env, env->GetObjectClass(exception.get()));
jmethodID mid = env->GetMethodID(exception_class.get(), "printStackTrace", "()V");
if (mid == nullptr) {
LOG(WARNING) << "JNI WARNING: no printStackTrace()V in "
- << PrettyTypeOf(old_exception.get());
+ << PrettyTypeOf(old_exception.Get());
} else {
env->CallVoidMethod(exception.get(), mid);
if (soa.Self()->IsExceptionPending()) {
@@ -686,10 +692,10 @@ class JNI {
soa.Self()->ClearException();
}
}
- ThrowLocation gc_safe_throw_location(old_throw_this_object.get(), old_throw_method.get(),
+ ThrowLocation gc_safe_throw_location(old_throw_this_object.Get(), old_throw_method.Get(),
old_throw_dex_pc);
- soa.Self()->SetException(gc_safe_throw_location, old_exception.get());
+ soa.Self()->SetException(gc_safe_throw_location, old_exception.Get());
}
static jthrowable ExceptionOccurred(JNIEnv* env) {
@@ -2443,9 +2449,9 @@ class JNI {
return JNIGlobalRefType;
case kWeakGlobal:
return JNIWeakGlobalRefType;
- case kSirtOrInvalid:
+ case kHandleScopeOrInvalid:
// Is it in a stack IRT?
- if (static_cast<JNIEnvExt*>(env)->self->SirtContains(java_object)) {
+ if (static_cast<JNIEnvExt*>(env)->self->HandleScopeContains(java_object)) {
return JNILocalRefType;
}
return JNIInvalidRefType;
@@ -3090,7 +3096,7 @@ void JavaVMExt::DumpReferenceTables(std::ostream& os) {
}
bool JavaVMExt::LoadNativeLibrary(const std::string& path,
- const SirtRef<mirror::ClassLoader>& class_loader,
+ const Handle<mirror::ClassLoader>& class_loader,
std::string* detail) {
detail->clear();
@@ -3106,18 +3112,18 @@ bool JavaVMExt::LoadNativeLibrary(const std::string& path,
library = libraries->Get(path);
}
if (library != nullptr) {
- if (library->GetClassLoader() != class_loader.get()) {
+ if (library->GetClassLoader() != class_loader.Get()) {
// The library will be associated with class_loader. The JNI
// spec says we can't load the same library into more than one
// class loader.
StringAppendF(detail, "Shared library \"%s\" already opened by "
"ClassLoader %p; can't open in ClassLoader %p",
- path.c_str(), library->GetClassLoader(), class_loader.get());
+ path.c_str(), library->GetClassLoader(), class_loader.Get());
LOG(WARNING) << detail;
return false;
}
VLOG(jni) << "[Shared library \"" << path << "\" already loaded in "
- << "ClassLoader " << class_loader.get() << "]";
+ << "ClassLoader " << class_loader.Get() << "]";
if (!library->CheckOnLoadResult()) {
StringAppendF(detail, "JNI_OnLoad failed on a previous attempt "
"to load \"%s\"", path.c_str());
@@ -3158,18 +3164,18 @@ bool JavaVMExt::LoadNativeLibrary(const std::string& path,
MutexLock mu(self, libraries_lock);
library = libraries->Get(path);
if (library == nullptr) { // We won race to get libraries_lock
- library = new SharedLibrary(path, handle, class_loader.get());
+ library = new SharedLibrary(path, handle, class_loader.Get());
libraries->Put(path, library);
created_library = true;
}
}
if (!created_library) {
LOG(INFO) << "WOW: we lost a race to add shared library: "
- << "\"" << path << "\" ClassLoader=" << class_loader.get();
+ << "\"" << path << "\" ClassLoader=" << class_loader.Get();
return library->CheckOnLoadResult();
}
- VLOG(jni) << "[Added shared library \"" << path << "\" for ClassLoader " << class_loader.get()
+ VLOG(jni) << "[Added shared library \"" << path << "\" for ClassLoader " << class_loader.Get()
<< "]";
bool was_successful = false;
@@ -3184,8 +3190,9 @@ bool JavaVMExt::LoadNativeLibrary(const std::string& path,
// the comments in the JNI FindClass function.)
typedef int (*JNI_OnLoadFn)(JavaVM*, void*);
JNI_OnLoadFn jni_on_load = reinterpret_cast<JNI_OnLoadFn>(sym);
- SirtRef<mirror::ClassLoader> old_class_loader(self, self->GetClassLoaderOverride());
- self->SetClassLoaderOverride(class_loader.get());
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ClassLoader> old_class_loader(hs.NewHandle(self->GetClassLoaderOverride()));
+ self->SetClassLoaderOverride(class_loader.Get());
int version = 0;
{
@@ -3194,7 +3201,7 @@ bool JavaVMExt::LoadNativeLibrary(const std::string& path,
version = (*jni_on_load)(this, nullptr);
}
- self->SetClassLoaderOverride(old_class_loader.get());
+ self->SetClassLoaderOverride(old_class_loader.Get());
if (version == JNI_ERR) {
StringAppendF(detail, "JNI_ERR returned from JNI_OnLoad in \"%s\"", path.c_str());
diff --git a/runtime/jni_internal.h b/runtime/jni_internal.h
index cdf3c47..5964947 100644
--- a/runtime/jni_internal.h
+++ b/runtime/jni_internal.h
@@ -46,7 +46,7 @@ union JValue;
class Libraries;
class ParsedOptions;
class ScopedObjectAccess;
-template<class T> class SirtRef;
+template<class T> class Handle;
class Thread;
void JniAbortF(const char* jni_function_name, const char* fmt, ...)
@@ -67,7 +67,7 @@ class JavaVMExt : public JavaVM {
* Returns 'true' on success. On failure, sets 'detail' to a
* human-readable description of the error.
*/
- bool LoadNativeLibrary(const std::string& path, const SirtRef<mirror::ClassLoader>& class_loader,
+ bool LoadNativeLibrary(const std::string& path, const Handle<mirror::ClassLoader>& class_loader,
std::string* detail)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc
index 139e2d0..552652c 100644
--- a/runtime/mirror/array.cc
+++ b/runtime/mirror/array.cc
@@ -26,7 +26,7 @@
#include "object_array.h"
#include "object_array-inl.h"
#include "object_utils.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
#include "thread.h"
#include "utils.h"
@@ -42,22 +42,25 @@ namespace mirror {
// Recursively create an array with multiple dimensions. Elements may be
// Objects or primitive types.
static Array* RecursiveCreateMultiArray(Thread* self,
- const SirtRef<Class>& array_class, int current_dimension,
- const SirtRef<mirror::IntArray>& dimensions)
+ const Handle<Class>& array_class, int current_dimension,
+ const Handle<mirror::IntArray>& dimensions)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
int32_t array_length = dimensions->Get(current_dimension);
- SirtRef<Array> new_array(self, Array::Alloc<true>(self, array_class.get(), array_length,
- array_class->GetComponentSize(),
- Runtime::Current()->GetHeap()->GetCurrentAllocator()));
- if (UNLIKELY(new_array.get() == nullptr)) {
+ StackHandleScope<1> hs(self);
+ Handle<Array> new_array(
+ hs.NewHandle(
+ Array::Alloc<true>(self, array_class.Get(), array_length, array_class->GetComponentSize(),
+ Runtime::Current()->GetHeap()->GetCurrentAllocator())));
+ if (UNLIKELY(new_array.Get() == nullptr)) {
CHECK(self->IsExceptionPending());
return nullptr;
}
if (current_dimension + 1 < dimensions->GetLength()) {
// Create a new sub-array in every element of the array.
for (int32_t i = 0; i < array_length; i++) {
- SirtRef<mirror::Class> sirt_component_type(self, array_class->GetComponentType());
- Array* sub_array = RecursiveCreateMultiArray(self, sirt_component_type,
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_component_type(hs.NewHandle(array_class->GetComponentType()));
+ Array* sub_array = RecursiveCreateMultiArray(self, h_component_type,
current_dimension + 1, dimensions);
if (UNLIKELY(sub_array == nullptr)) {
CHECK(self->IsExceptionPending());
@@ -67,11 +70,11 @@ static Array* RecursiveCreateMultiArray(Thread* self,
new_array->AsObjectArray<Array>()->Set<false, false>(i, sub_array);
}
}
- return new_array.get();
+ return new_array.Get();
}
-Array* Array::CreateMultiArray(Thread* self, const SirtRef<Class>& element_class,
- const SirtRef<IntArray>& dimensions) {
+Array* Array::CreateMultiArray(Thread* self, const Handle<Class>& element_class,
+ const Handle<IntArray>& dimensions) {
// Verify dimensions.
//
// The caller is responsible for verifying that "dimArray" is non-null
@@ -90,15 +93,16 @@ Array* Array::CreateMultiArray(Thread* self, const SirtRef<Class>& element_class
// Find/generate the array class.
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- SirtRef<mirror::Class> array_class(self,
- class_linker->FindArrayClass(self, element_class.get()));
- if (UNLIKELY(array_class.get() == nullptr)) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> array_class(
+ hs.NewHandle(class_linker->FindArrayClass(self, element_class.Get())));
+ if (UNLIKELY(array_class.Get() == nullptr)) {
CHECK(self->IsExceptionPending());
return nullptr;
}
for (int32_t i = 1; i < dimensions->GetLength(); ++i) {
- array_class.reset(class_linker->FindArrayClass(self, array_class.get()));
- if (UNLIKELY(array_class.get() == nullptr)) {
+ array_class.Assign(class_linker->FindArrayClass(self, array_class.Get()));
+ if (UNLIKELY(array_class.Get() == nullptr)) {
CHECK(self->IsExceptionPending());
return nullptr;
}
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index eead4eb..238506e 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -23,7 +23,7 @@
namespace art {
-template<class T> class SirtRef;
+template<class T> class Handle;
namespace mirror {
@@ -38,8 +38,8 @@ class MANAGED Array : public Object {
bool fill_usable = false)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static Array* CreateMultiArray(Thread* self, const SirtRef<Class>& element_class,
- const SirtRef<IntArray>& dimensions)
+ static Array* CreateMultiArray(Thread* self, const Handle<Class>& element_class,
+ const Handle<IntArray>& dimensions)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
diff --git a/runtime/mirror/art_field.cc b/runtime/mirror/art_field.cc
index 8eb30f9..86c5c3f 100644
--- a/runtime/mirror/art_field.cc
+++ b/runtime/mirror/art_field.cc
@@ -29,7 +29,7 @@
namespace art {
namespace mirror {
-// TODO: get global references for these
+// TODO: Get global references for these
Class* ArtField::java_lang_reflect_ArtField_ = NULL;
ArtField* ArtField::FromReflectedField(const ScopedObjectAccess& soa, jobject jlr_field) {
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index 91753df..c3e2d22 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -225,10 +225,10 @@ inline QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo() {
if (UNLIKELY(entry_point == GetQuickGenericJniTrampoline())) {
// Generic JNI frame.
DCHECK(IsNative());
- uint32_t sirt_refs = MethodHelper(this).GetNumberOfReferenceArgsWithoutReceiver() + 1;
- size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSize(sirt_refs);
+ uint32_t handle_refs = MethodHelper(this).GetNumberOfReferenceArgsWithoutReceiver() + 1;
+ size_t scope_size = HandleScope::GetAlignedHandleScopeSize(handle_refs);
QuickMethodFrameInfo callee_info = runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
- return QuickMethodFrameInfo(callee_info.FrameSizeInBytes() + sirt_size,
+ return QuickMethodFrameInfo(callee_info.FrameSizeInBytes() + scope_size,
callee_info.CoreSpillMask(), callee_info.FpSpillMask());
}
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index eef60f7..0632a68 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -230,14 +230,15 @@ uintptr_t ArtMethod::ToNativePc(const uint32_t dex_pc) {
return 0;
}
-uint32_t ArtMethod::FindCatchBlock(SirtRef<Class>& exception_type, uint32_t dex_pc,
+uint32_t ArtMethod::FindCatchBlock(Handle<Class>& exception_type, uint32_t dex_pc,
bool* has_no_move_exception) {
MethodHelper mh(this);
const DexFile::CodeItem* code_item = mh.GetCodeItem();
// Set aside the exception while we resolve its type.
Thread* self = Thread::Current();
ThrowLocation throw_location;
- SirtRef<mirror::Throwable> exception(self, self->GetException(&throw_location));
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Throwable> exception(hs.NewHandle(self->GetException(&throw_location)));
self->ClearException();
// Default to handler not found.
uint32_t found_dex_pc = DexFile::kDexNoIndex;
@@ -251,11 +252,11 @@ uint32_t ArtMethod::FindCatchBlock(SirtRef<Class>& exception_type, uint32_t dex_
}
// Does this catch exception type apply?
Class* iter_exception_type = mh.GetClassFromTypeIdx(iter_type_idx);
- if (exception_type.get() == nullptr) {
+ if (exception_type.Get() == nullptr) {
self->ClearException();
LOG(WARNING) << "Unresolved exception class when finding catch block: "
<< mh.GetTypeDescriptorFromTypeIdx(iter_type_idx);
- } else if (iter_exception_type->IsAssignableFrom(exception_type.get())) {
+ } else if (iter_exception_type->IsAssignableFrom(exception_type.Get())) {
found_dex_pc = it.GetHandlerAddress();
break;
}
@@ -266,8 +267,8 @@ uint32_t ArtMethod::FindCatchBlock(SirtRef<Class>& exception_type, uint32_t dex_
*has_no_move_exception = (first_catch_instr->Opcode() != Instruction::MOVE_EXCEPTION);
}
// Put the exception back.
- if (exception.get() != nullptr) {
- self->SetException(throw_location, exception.get());
+ if (exception.Get() != nullptr) {
+ self->SetException(throw_location, exception.Get());
}
return found_dex_pc;
}
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index 49d22ab..27a10be 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -332,7 +332,7 @@ class MANAGED ArtMethod : public Object {
return GetFrameSizeInBytes() - kPointerSize;
}
- size_t GetSirtOffsetInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t GetHandleScopeOffsetInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return kPointerSize;
}
@@ -381,7 +381,7 @@ class MANAGED ArtMethod : public Object {
// Find the catch block for the given exception type and dex_pc. When a catch block is found,
// indicates whether the found catch block is responsible for clearing the exception or whether
// a move-exception instruction is present.
- uint32_t FindCatchBlock(SirtRef<Class>& exception_type, uint32_t dex_pc,
+ uint32_t FindCatchBlock(Handle<Class>& exception_type, uint32_t dex_pc,
bool* has_no_move_exception)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index ff63782..15b69f3 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -28,7 +28,7 @@
#include "object_array-inl.h"
#include "object_utils.h"
#include "runtime.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
#include "thread.h"
#include "throwable.h"
#include "utils.h"
@@ -77,20 +77,13 @@ void Class::SetStatus(Status new_status, Thread* self) {
<< "Attempt to set as erroneous an already erroneous class " << PrettyClass(this);
// Stash current exception.
- SirtRef<mirror::Object> old_throw_this_object(self, NULL);
- SirtRef<mirror::ArtMethod> old_throw_method(self, NULL);
- SirtRef<mirror::Throwable> old_exception(self, NULL);
- uint32_t old_throw_dex_pc;
- {
- ThrowLocation old_throw_location;
- mirror::Throwable* old_exception_obj = self->GetException(&old_throw_location);
- old_throw_this_object.reset(old_throw_location.GetThis());
- old_throw_method.reset(old_throw_location.GetMethod());
- old_exception.reset(old_exception_obj);
- old_throw_dex_pc = old_throw_location.GetDexPc();
- self->ClearException();
- }
- CHECK(old_exception.get() != NULL);
+ StackHandleScope<3> hs(self);
+ ThrowLocation old_throw_location;
+ Handle<mirror::Throwable> old_exception(hs.NewHandle(self->GetException(&old_throw_location)));
+ CHECK(old_exception.Get() != nullptr);
+ Handle<mirror::Object> old_throw_this_object(hs.NewHandle(old_throw_location.GetThis()));
+ Handle<mirror::ArtMethod> old_throw_method(hs.NewHandle(old_throw_location.GetMethod()));
+ uint32_t old_throw_dex_pc = old_throw_location.GetDexPc();
// clear exception to call FindSystemClass
self->ClearException();
@@ -107,10 +100,10 @@ void Class::SetStatus(Status new_status, Thread* self) {
}
// Restore exception.
- ThrowLocation gc_safe_throw_location(old_throw_this_object.get(), old_throw_method.get(),
+ ThrowLocation gc_safe_throw_location(old_throw_this_object.Get(), old_throw_method.Get(),
old_throw_dex_pc);
- self->SetException(gc_safe_throw_location, old_exception.get());
+ self->SetException(gc_safe_throw_location, old_exception.Get());
}
CHECK(sizeof(Status) == sizeof(uint32_t)) << PrettyClass(this);
if (Runtime::Current()->IsActiveTransaction()) {
@@ -149,7 +142,8 @@ String* Class::ComputeName() {
return name;
}
Thread* self = Thread::Current();
- SirtRef<mirror::Class> sirt_c(self, this);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> handle_c(hs.NewHandle(this));
std::string descriptor(ClassHelper(this).GetDescriptor());
if ((descriptor[0] != 'L') && (descriptor[0] != '[')) {
// The descriptor indicates that this is the class for
@@ -179,7 +173,7 @@ String* Class::ComputeName() {
std::replace(descriptor.begin(), descriptor.end(), '/', '.');
name = String::AllocFromModifiedUtf8(self, descriptor.c_str());
}
- sirt_c->SetName(name);
+ handle_c->SetName(name);
return name;
}
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 1f393db..92b999e 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -429,7 +429,7 @@ class MANAGED Class : public Object {
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
bool IsVariableSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Classes and arrays vary in size, and so the object_size_ field cannot
- // be used to get their instance size
+ // be used to Get their instance size
return IsClassClass<kVerifyFlags, kReadBarrierOption>() ||
IsArrayClass<kVerifyFlags, kReadBarrierOption>();
}
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index fef1f9b..3d28dc6 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -23,7 +23,7 @@
#include "gc/heap.h"
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
namespace art {
namespace mirror {
@@ -32,9 +32,10 @@ class DexCacheTest : public CommonRuntimeTest {};
TEST_F(DexCacheTest, Open) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<DexCache> dex_cache(soa.Self(), class_linker_->AllocDexCache(soa.Self(),
- *java_lang_dex_file_));
- ASSERT_TRUE(dex_cache.get() != NULL);
+ StackHandleScope<1> hs(soa.Self());
+ Handle<DexCache> dex_cache(
+ hs.NewHandle(class_linker_->AllocDexCache(soa.Self(), *java_lang_dex_file_)));
+ ASSERT_TRUE(dex_cache.Get() != NULL);
EXPECT_EQ(java_lang_dex_file_->NumStringIds(), dex_cache->NumStrings());
EXPECT_EQ(java_lang_dex_file_->NumTypeIds(), dex_cache->NumResolvedTypes());
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 2f775bc..04905a5 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -32,7 +32,7 @@
#include "object_array-inl.h"
#include "object_utils.h"
#include "runtime.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
#include "throwable.h"
#include "well_known_classes.h"
@@ -100,19 +100,19 @@ static Object* CopyObject(Thread* self, mirror::Object* dest, mirror::Object* sr
// An allocation pre-fence visitor that copies the object.
class CopyObjectVisitor {
public:
- explicit CopyObjectVisitor(Thread* self, SirtRef<Object>* orig, size_t num_bytes)
+ explicit CopyObjectVisitor(Thread* self, Handle<Object>* orig, size_t num_bytes)
: self_(self), orig_(orig), num_bytes_(num_bytes) {
}
void operator()(Object* obj, size_t usable_size) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
UNUSED(usable_size);
- CopyObject(self_, obj, orig_->get(), num_bytes_);
+ CopyObject(self_, obj, orig_->Get(), num_bytes_);
}
private:
Thread* const self_;
- SirtRef<Object>* const orig_;
+ Handle<Object>* const orig_;
const size_t num_bytes_;
DISALLOW_COPY_AND_ASSIGN(CopyObjectVisitor);
};
@@ -123,7 +123,8 @@ Object* Object::Clone(Thread* self) {
// be wrong.
gc::Heap* heap = Runtime::Current()->GetHeap();
size_t num_bytes = SizeOf();
- SirtRef<Object> this_object(self, this);
+ StackHandleScope<1> hs(self);
+ Handle<Object> this_object(hs.NewHandle(this));
Object* copy;
CopyObjectVisitor visitor(self, &this_object, num_bytes);
if (heap->IsMovableObject(this)) {
@@ -163,10 +164,11 @@ int32_t Object::IdentityHashCode() const {
case LockWord::kThinLocked: {
// Inflate the thin lock to a monitor and stick the hash code inside of the monitor.
Thread* self = Thread::Current();
- SirtRef<mirror::Object> sirt_this(self, current_this);
- Monitor::InflateThinLocked(self, sirt_this, lw, GenerateIdentityHashCode());
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Object> h_this(hs.NewHandle(current_this));
+ Monitor::InflateThinLocked(self, h_this, lw, GenerateIdentityHashCode());
// A GC may have occurred when we switched to kBlocked.
- current_this = sirt_this.get();
+ current_this = h_this.Get();
break;
}
case LockWord::kFatLocked: {
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index 203a6b2..942a271 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -23,7 +23,7 @@
#include "mirror/art_field.h"
#include "mirror/class.h"
#include "runtime.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
#include "thread.h"
#include <string>
@@ -118,7 +118,7 @@ inline void ObjectArray<T>::AssignableMemmove(int32_t dst_pos, ObjectArray<T>* s
int32_t src_pos, int32_t count) {
if (kIsDebugBuild) {
for (int i = 0; i < count; ++i) {
- // The Get will perform the VerifyObject.
+ // The get will perform the VerifyObject.
src->GetWithoutChecks(src_pos + i);
}
}
@@ -150,7 +150,7 @@ inline void ObjectArray<T>::AssignableMemmove(int32_t dst_pos, ObjectArray<T>* s
Runtime::Current()->GetHeap()->WriteBarrierArray(this, dst_pos, count);
if (kIsDebugBuild) {
for (int i = 0; i < count; ++i) {
- // The Get will perform the VerifyObject.
+ // The get will perform the VerifyObject.
GetWithoutChecks(dst_pos + i);
}
}
@@ -161,7 +161,7 @@ inline void ObjectArray<T>::AssignableMemcpy(int32_t dst_pos, ObjectArray<T>* sr
int32_t src_pos, int32_t count) {
if (kIsDebugBuild) {
for (int i = 0; i < count; ++i) {
- // The Get will perform the VerifyObject.
+ // The get will perform the VerifyObject.
src->GetWithoutChecks(src_pos + i);
}
}
@@ -182,7 +182,7 @@ inline void ObjectArray<T>::AssignableMemcpy(int32_t dst_pos, ObjectArray<T>* sr
Runtime::Current()->GetHeap()->WriteBarrierArray(this, dst_pos, count);
if (kIsDebugBuild) {
for (int i = 0; i < count; ++i) {
- // The Get will perform the VerifyObject.
+ // The get will perform the VerifyObject.
GetWithoutChecks(dst_pos + i);
}
}
@@ -244,13 +244,14 @@ template<class T>
inline ObjectArray<T>* ObjectArray<T>::CopyOf(Thread* self, int32_t new_length) {
DCHECK_GE(new_length, 0);
// We may get copied by a compacting GC.
- SirtRef<ObjectArray<T> > sirt_this(self, this);
+ StackHandleScope<1> hs(self);
+ Handle<ObjectArray<T> > h_this(hs.NewHandle(this));
gc::Heap* heap = Runtime::Current()->GetHeap();
gc::AllocatorType allocator_type = heap->IsMovableObject(this) ? heap->GetCurrentAllocator() :
heap->GetCurrentNonMovingAllocator();
ObjectArray<T>* new_array = Alloc(self, GetClass(), new_length, allocator_type);
if (LIKELY(new_array != nullptr)) {
- new_array->AssignableMemcpy(0, sirt_this.get(), 0, std::min(sirt_this->GetLength(), new_length));
+ new_array->AssignableMemcpy(0, h_this.Get(), 0, std::min(h_this->GetLength(), new_length));
}
return new_array;
}
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index c494f13..537fe85 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -34,7 +34,7 @@
#include "art_method-inl.h"
#include "object-inl.h"
#include "object_array-inl.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
#include "string-inl.h"
#include "UniquePtr.h"
@@ -56,7 +56,9 @@ class ObjectTest : public CommonRuntimeTest {
}
Thread* self = Thread::Current();
- SirtRef<String> string(self, String::AllocFromModifiedUtf8(self, expected_utf16_length, utf8_in));
+ StackHandleScope<1> hs(self);
+ Handle<String> string(
+ hs.NewHandle(String::AllocFromModifiedUtf8(self, expected_utf16_length, utf8_in)));
ASSERT_EQ(expected_utf16_length, string->GetLength());
ASSERT_TRUE(string->GetCharArray() != NULL);
ASSERT_TRUE(string->GetCharArray()->GetData() != NULL);
@@ -102,8 +104,9 @@ TEST_F(ObjectTest, IsInSamePackage) {
TEST_F(ObjectTest, Clone) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<ObjectArray<Object> > a1(soa.Self(),
- class_linker_->AllocObjectArray<Object>(soa.Self(), 256));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<ObjectArray<Object>> a1(
+ hs.NewHandle(class_linker_->AllocObjectArray<Object>(soa.Self(), 256)));
size_t s1 = a1->SizeOf();
Object* clone = a1->Clone(soa.Self());
EXPECT_EQ(s1, clone->SizeOf());
@@ -112,17 +115,18 @@ TEST_F(ObjectTest, Clone) {
TEST_F(ObjectTest, AllocObjectArray) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<ObjectArray<Object> > oa(soa.Self(),
- class_linker_->AllocObjectArray<Object>(soa.Self(), 2));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<ObjectArray<Object> > oa(
+ hs.NewHandle(class_linker_->AllocObjectArray<Object>(soa.Self(), 2)));
EXPECT_EQ(2, oa->GetLength());
EXPECT_TRUE(oa->Get(0) == NULL);
EXPECT_TRUE(oa->Get(1) == NULL);
- oa->Set<false>(0, oa.get());
- EXPECT_TRUE(oa->Get(0) == oa.get());
+ oa->Set<false>(0, oa.Get());
+ EXPECT_TRUE(oa->Get(0) == oa.Get());
EXPECT_TRUE(oa->Get(1) == NULL);
- oa->Set<false>(1, oa.get());
- EXPECT_TRUE(oa->Get(0) == oa.get());
- EXPECT_TRUE(oa->Get(1) == oa.get());
+ oa->Set<false>(1, oa.Get());
+ EXPECT_TRUE(oa->Get(0) == oa.Get());
+ EXPECT_TRUE(oa->Get(1) == oa.Get());
Class* aioobe = class_linker_->FindSystemClass(soa.Self(),
"Ljava/lang/ArrayIndexOutOfBoundsException;");
@@ -149,20 +153,22 @@ TEST_F(ObjectTest, AllocObjectArray) {
TEST_F(ObjectTest, AllocArray) {
ScopedObjectAccess soa(Thread::Current());
Class* c = class_linker_->FindSystemClass(soa.Self(), "[I");
- SirtRef<Array> a(soa.Self(), Array::Alloc<true>(soa.Self(), c, 1, c->GetComponentSize(),
- Runtime::Current()->GetHeap()->GetCurrentAllocator()));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<Array> a(
+ hs.NewHandle(Array::Alloc<true>(soa.Self(), c, 1, c->GetComponentSize(),
+ Runtime::Current()->GetHeap()->GetCurrentAllocator())));
EXPECT_TRUE(c == a->GetClass());
EXPECT_EQ(1, a->GetLength());
c = class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;");
- a.reset(Array::Alloc<true>(soa.Self(), c, 1, c->GetComponentSize(),
- Runtime::Current()->GetHeap()->GetCurrentAllocator()));
+ a.Assign(Array::Alloc<true>(soa.Self(), c, 1, c->GetComponentSize(),
+ Runtime::Current()->GetHeap()->GetCurrentAllocator()));
EXPECT_TRUE(c == a->GetClass());
EXPECT_EQ(1, a->GetLength());
c = class_linker_->FindSystemClass(soa.Self(), "[[Ljava/lang/Object;");
- a.reset(Array::Alloc<true>(soa.Self(), c, 1, c->GetComponentSize(),
- Runtime::Current()->GetHeap()->GetCurrentAllocator()));
+ a.Assign(Array::Alloc<true>(soa.Self(), c, 1, c->GetComponentSize(),
+ Runtime::Current()->GetHeap()->GetCurrentAllocator()));
EXPECT_TRUE(c == a->GetClass());
EXPECT_EQ(1, a->GetLength());
}
@@ -170,28 +176,27 @@ TEST_F(ObjectTest, AllocArray) {
TEST_F(ObjectTest, AllocArray_FillUsable) {
ScopedObjectAccess soa(Thread::Current());
Class* c = class_linker_->FindSystemClass(soa.Self(), "[B");
- SirtRef<Array> a(soa.Self(), Array::Alloc<true>(soa.Self(), c, 1, c->GetComponentSize(),
- Runtime::Current()->GetHeap()->GetCurrentAllocator(),
- true));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<Array> a(
+ hs.NewHandle(Array::Alloc<true>(soa.Self(), c, 1, c->GetComponentSize(),
+ Runtime::Current()->GetHeap()->GetCurrentAllocator(), true)));
EXPECT_TRUE(c == a->GetClass());
EXPECT_LE(1, a->GetLength());
c = class_linker_->FindSystemClass(soa.Self(), "[I");
- a.reset(Array::Alloc<true>(soa.Self(), c, 2, c->GetComponentSize(),
- Runtime::Current()->GetHeap()->GetCurrentAllocator(),
- true));
+ a.Assign(Array::Alloc<true>(soa.Self(), c, 2, c->GetComponentSize(),
+ Runtime::Current()->GetHeap()->GetCurrentAllocator(), true));
EXPECT_TRUE(c == a->GetClass());
EXPECT_LE(2, a->GetLength());
c = class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;");
- a.reset(Array::Alloc<true>(soa.Self(), c, 2, c->GetComponentSize(),
- Runtime::Current()->GetHeap()->GetCurrentAllocator(),
- true));
+ a.Assign(Array::Alloc<true>(soa.Self(), c, 2, c->GetComponentSize(),
+ Runtime::Current()->GetHeap()->GetCurrentAllocator(), true));
EXPECT_TRUE(c == a->GetClass());
EXPECT_LE(2, a->GetLength());
c = class_linker_->FindSystemClass(soa.Self(), "[[Ljava/lang/Object;");
- a.reset(Array::Alloc<true>(soa.Self(), c, 2, c->GetComponentSize(),
+ a.Assign(Array::Alloc<true>(soa.Self(), c, 2, c->GetComponentSize(),
Runtime::Current()->GetHeap()->GetCurrentAllocator(), true));
EXPECT_TRUE(c == a->GetClass());
EXPECT_LE(2, a->GetLength());
@@ -273,8 +278,9 @@ TEST_F(ObjectTest, CheckAndAllocArrayFromCode) {
TEST_F(ObjectTest, CreateMultiArray) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<Class> c(soa.Self(), class_linker_->FindSystemClass(soa.Self(), "I"));
- SirtRef<IntArray> dims(soa.Self(), IntArray::Alloc(soa.Self(), 1));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<Class> c(hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "I")));
+ Handle<IntArray> dims(hs.NewHandle(IntArray::Alloc(soa.Self(), 1)));
dims->Set<false>(0, 1);
Array* multi = Array::CreateMultiArray(soa.Self(), c, dims);
EXPECT_TRUE(multi->GetClass() == class_linker_->FindSystemClass(soa.Self(), "[I"));
@@ -287,7 +293,7 @@ TEST_F(ObjectTest, CreateMultiArray) {
"java.lang.NegativeArraySizeException");
soa.Self()->ClearException();
- dims.reset(IntArray::Alloc(soa.Self(), 2));
+ dims.Assign(IntArray::Alloc(soa.Self(), 2));
for (int i = 1; i < 20; ++i) {
for (int j = 0; j < 20; ++j) {
dims->Set<false>(0, i);
@@ -311,7 +317,8 @@ TEST_F(ObjectTest, StaticFieldFromCode) {
const DexFile* dex_file = Runtime::Current()->GetCompileTimeClassPath(class_loader)[0];
CHECK(dex_file != NULL);
- SirtRef<mirror::ClassLoader> loader(soa.Self(), soa.Decode<ClassLoader*>(class_loader));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::ClassLoader> loader(hs.NewHandle(soa.Decode<ClassLoader*>(class_loader)));
Class* klass = class_linker_->FindClass(soa.Self(), "LStaticsFromCode;", loader);
ArtMethod* clinit = klass->FindClassInitializer();
const DexFile::StringId* klass_string_id = dex_file->FindStringId("LStaticsFromCode;");
@@ -339,9 +346,9 @@ TEST_F(ObjectTest, StaticFieldFromCode) {
Object* s0 = field->GetObj(klass);
EXPECT_TRUE(s0 != NULL);
- SirtRef<CharArray> char_array(soa.Self(), CharArray::Alloc(soa.Self(), 0));
- field->SetObj<false>(field->GetDeclaringClass(), char_array.get());
- EXPECT_EQ(char_array.get(), field->GetObj(klass));
+ Handle<CharArray> char_array(hs.NewHandle(CharArray::Alloc(soa.Self(), 0)));
+ field->SetObj<false>(field->GetDeclaringClass(), char_array.Get());
+ EXPECT_EQ(char_array.Get(), field->GetObj(klass));
field->SetObj<false>(field->GetDeclaringClass(), NULL);
EXPECT_EQ(NULL, field->GetObj(klass));
@@ -375,7 +382,8 @@ TEST_F(ObjectTest, String) {
TEST_F(ObjectTest, StringEqualsUtf8) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<String> string(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "android"));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<String> string(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "android")));
EXPECT_TRUE(string->Equals("android"));
EXPECT_FALSE(string->Equals("Android"));
EXPECT_FALSE(string->Equals("ANDROID"));
@@ -383,46 +391,49 @@ TEST_F(ObjectTest, StringEqualsUtf8) {
EXPECT_FALSE(string->Equals("and"));
EXPECT_FALSE(string->Equals("androids"));
- SirtRef<String> empty(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), ""));
+ Handle<String> empty(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "")));
EXPECT_TRUE(empty->Equals(""));
EXPECT_FALSE(empty->Equals("a"));
}
TEST_F(ObjectTest, StringEquals) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<String> string(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "android"));
- SirtRef<String> string_2(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "android"));
- EXPECT_TRUE(string->Equals(string_2.get()));
+ StackHandleScope<3> hs(soa.Self());
+ Handle<String> string(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "android")));
+ Handle<String> string_2(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "android")));
+ EXPECT_TRUE(string->Equals(string_2.Get()));
EXPECT_FALSE(string->Equals("Android"));
EXPECT_FALSE(string->Equals("ANDROID"));
EXPECT_FALSE(string->Equals(""));
EXPECT_FALSE(string->Equals("and"));
EXPECT_FALSE(string->Equals("androids"));
- SirtRef<String> empty(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), ""));
+ Handle<String> empty(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "")));
EXPECT_TRUE(empty->Equals(""));
EXPECT_FALSE(empty->Equals("a"));
}
TEST_F(ObjectTest, StringCompareTo) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<String> string(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "android"));
- SirtRef<String> string_2(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "android"));
- SirtRef<String> string_3(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "Android"));
- SirtRef<String> string_4(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "and"));
- SirtRef<String> string_5(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), ""));
- EXPECT_EQ(0, string->CompareTo(string_2.get()));
- EXPECT_LT(0, string->CompareTo(string_3.get()));
- EXPECT_GT(0, string_3->CompareTo(string.get()));
- EXPECT_LT(0, string->CompareTo(string_4.get()));
- EXPECT_GT(0, string_4->CompareTo(string.get()));
- EXPECT_LT(0, string->CompareTo(string_5.get()));
- EXPECT_GT(0, string_5->CompareTo(string.get()));
+ StackHandleScope<5> hs(soa.Self());
+ Handle<String> string(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "android")));
+ Handle<String> string_2(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "android")));
+ Handle<String> string_3(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "Android")));
+ Handle<String> string_4(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "and")));
+ Handle<String> string_5(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "")));
+ EXPECT_EQ(0, string->CompareTo(string_2.Get()));
+ EXPECT_LT(0, string->CompareTo(string_3.Get()));
+ EXPECT_GT(0, string_3->CompareTo(string.Get()));
+ EXPECT_LT(0, string->CompareTo(string_4.Get()));
+ EXPECT_GT(0, string_4->CompareTo(string.Get()));
+ EXPECT_LT(0, string->CompareTo(string_5.Get()));
+ EXPECT_GT(0, string_5->CompareTo(string.Get()));
}
TEST_F(ObjectTest, StringLength) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<String> string(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "android"));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<String> string(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "android")));
EXPECT_EQ(string->GetLength(), 7);
EXPECT_EQ(string->GetUtfLength(), 7);
@@ -440,8 +451,9 @@ TEST_F(ObjectTest, DescriptorCompare) {
jobject jclass_loader_1 = LoadDex("ProtoCompare");
jobject jclass_loader_2 = LoadDex("ProtoCompare2");
- SirtRef<ClassLoader> class_loader_1(soa.Self(), soa.Decode<ClassLoader*>(jclass_loader_1));
- SirtRef<ClassLoader> class_loader_2(soa.Self(), soa.Decode<ClassLoader*>(jclass_loader_2));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<ClassLoader> class_loader_1(hs.NewHandle(soa.Decode<ClassLoader*>(jclass_loader_1)));
+ Handle<ClassLoader> class_loader_2(hs.NewHandle(soa.Decode<ClassLoader*>(jclass_loader_2)));
Class* klass1 = linker->FindClass(soa.Self(), "LProtoCompare;", class_loader_1);
ASSERT_TRUE(klass1 != NULL);
@@ -497,9 +509,10 @@ TEST_F(ObjectTest, DescriptorCompare) {
TEST_F(ObjectTest, StringHashCode) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<String> empty(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), ""));
- SirtRef<String> A(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "A"));
- SirtRef<String> ABC(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "ABC"));
+ StackHandleScope<3> hs(soa.Self());
+ Handle<String> empty(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "")));
+ Handle<String> A(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "A")));
+ Handle<String> ABC(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "ABC")));
EXPECT_EQ(0, empty->GetHashCode());
EXPECT_EQ(65, A->GetHashCode());
@@ -509,17 +522,18 @@ TEST_F(ObjectTest, StringHashCode) {
TEST_F(ObjectTest, InstanceOf) {
ScopedObjectAccess soa(Thread::Current());
jobject jclass_loader = LoadDex("XandY");
- SirtRef<ClassLoader> class_loader(soa.Self(), soa.Decode<ClassLoader*>(jclass_loader));
+ StackHandleScope<3> hs(soa.Self());
+ Handle<ClassLoader> class_loader(hs.NewHandle(soa.Decode<ClassLoader*>(jclass_loader)));
Class* X = class_linker_->FindClass(soa.Self(), "LX;", class_loader);
Class* Y = class_linker_->FindClass(soa.Self(), "LY;", class_loader);
ASSERT_TRUE(X != NULL);
ASSERT_TRUE(Y != NULL);
- SirtRef<Object> x(soa.Self(), X->AllocObject(soa.Self()));
- SirtRef<Object> y(soa.Self(), Y->AllocObject(soa.Self()));
- ASSERT_TRUE(x.get() != NULL);
- ASSERT_TRUE(y.get() != NULL);
+ Handle<Object> x(hs.NewHandle(X->AllocObject(soa.Self())));
+ Handle<Object> y(hs.NewHandle(Y->AllocObject(soa.Self())));
+ ASSERT_TRUE(x.Get() != NULL);
+ ASSERT_TRUE(y.Get() != NULL);
EXPECT_TRUE(x->InstanceOf(X));
EXPECT_FALSE(x->InstanceOf(Y));
@@ -543,7 +557,8 @@ TEST_F(ObjectTest, InstanceOf) {
TEST_F(ObjectTest, IsAssignableFrom) {
ScopedObjectAccess soa(Thread::Current());
jobject jclass_loader = LoadDex("XandY");
- SirtRef<ClassLoader> class_loader(soa.Self(), soa.Decode<ClassLoader*>(jclass_loader));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<ClassLoader> class_loader(hs.NewHandle(soa.Decode<ClassLoader*>(jclass_loader)));
Class* X = class_linker_->FindClass(soa.Self(), "LX;", class_loader);
Class* Y = class_linker_->FindClass(soa.Self(), "LY;", class_loader);
@@ -580,7 +595,8 @@ TEST_F(ObjectTest, IsAssignableFrom) {
TEST_F(ObjectTest, IsAssignableFromArray) {
ScopedObjectAccess soa(Thread::Current());
jobject jclass_loader = LoadDex("XandY");
- SirtRef<ClassLoader> class_loader(soa.Self(), soa.Decode<ClassLoader*>(jclass_loader));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<ClassLoader> class_loader(hs.NewHandle(soa.Decode<ClassLoader*>(jclass_loader)));
Class* X = class_linker_->FindClass(soa.Self(), "LX;", class_loader);
Class* Y = class_linker_->FindClass(soa.Self(), "LY;", class_loader);
ASSERT_TRUE(X != NULL);
@@ -632,8 +648,9 @@ TEST_F(ObjectTest, IsAssignableFromArray) {
TEST_F(ObjectTest, FindInstanceField) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<String> s(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "ABC"));
- ASSERT_TRUE(s.get() != NULL);
+ StackHandleScope<1> hs(soa.Self());
+ Handle<String> s(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "ABC")));
+ ASSERT_TRUE(s.Get() != NULL);
Class* c = s->GetClass();
ASSERT_TRUE(c != NULL);
@@ -665,8 +682,9 @@ TEST_F(ObjectTest, FindInstanceField) {
TEST_F(ObjectTest, FindStaticField) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<String> s(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(), "ABC"));
- ASSERT_TRUE(s.get() != NULL);
+ StackHandleScope<1> hs(soa.Self());
+ Handle<String> s(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "ABC")));
+ ASSERT_TRUE(s.Get() != NULL);
Class* c = s->GetClass();
ASSERT_TRUE(c != NULL);
diff --git a/runtime/mirror/stack_trace_element.cc b/runtime/mirror/stack_trace_element.cc
index f220039..d8591cc 100644
--- a/runtime/mirror/stack_trace_element.cc
+++ b/runtime/mirror/stack_trace_element.cc
@@ -20,7 +20,7 @@
#include "class-inl.h"
#include "gc/accounting/card_table-inl.h"
#include "object-inl.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
#include "string.h"
namespace art {
@@ -40,9 +40,9 @@ void StackTraceElement::ResetClass() {
}
StackTraceElement* StackTraceElement::Alloc(Thread* self,
- SirtRef<String>& declaring_class,
- SirtRef<String>& method_name,
- SirtRef<String>& file_name,
+ Handle<String>& declaring_class,
+ Handle<String>& method_name,
+ Handle<String>& file_name,
int32_t line_number) {
StackTraceElement* trace =
down_cast<StackTraceElement*>(GetStackTraceElement()->AllocObject(self));
@@ -57,14 +57,14 @@ StackTraceElement* StackTraceElement::Alloc(Thread* self,
}
template<bool kTransactionActive>
-void StackTraceElement::Init(SirtRef<String>& declaring_class, SirtRef<String>& method_name,
- SirtRef<String>& file_name, int32_t line_number) {
+void StackTraceElement::Init(Handle<String>& declaring_class, Handle<String>& method_name,
+ Handle<String>& file_name, int32_t line_number) {
SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, declaring_class_),
- declaring_class.get());
+ declaring_class.Get());
SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, method_name_),
- method_name.get());
+ method_name.Get());
SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, file_name_),
- file_name.get());
+ file_name.Get());
SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, line_number_),
line_number);
}
diff --git a/runtime/mirror/stack_trace_element.h b/runtime/mirror/stack_trace_element.h
index c324d96..22d9b71 100644
--- a/runtime/mirror/stack_trace_element.h
+++ b/runtime/mirror/stack_trace_element.h
@@ -22,7 +22,7 @@
namespace art {
-template<class T> class SirtRef;
+template<class T> class Handle;
struct StackTraceElementOffsets;
namespace mirror {
@@ -47,9 +47,9 @@ class MANAGED StackTraceElement : public Object {
}
static StackTraceElement* Alloc(Thread* self,
- SirtRef<String>& declaring_class,
- SirtRef<String>& method_name,
- SirtRef<String>& file_name,
+ Handle<String>& declaring_class,
+ Handle<String>& method_name,
+ Handle<String>& file_name,
int32_t line_number)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -70,8 +70,8 @@ class MANAGED StackTraceElement : public Object {
int32_t line_number_;
template<bool kTransactionActive>
- void Init(SirtRef<String>& declaring_class, SirtRef<String>& method_name,
- SirtRef<String>& file_name, int32_t line_number)
+ void Init(Handle<String>& declaring_class, Handle<String>& method_name,
+ Handle<String>& file_name, int32_t line_number)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static Class* java_lang_StackTraceElement_;
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index 6a0c225..ee719b4 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -22,7 +22,7 @@
#include "intern_table.h"
#include "object-inl.h"
#include "runtime.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
#include "thread.h"
#include "utf-inl.h"
@@ -123,18 +123,19 @@ String* String::AllocFromModifiedUtf8(Thread* self, int32_t utf16_length,
}
String* String::Alloc(Thread* self, int32_t utf16_length) {
- SirtRef<CharArray> array(self, CharArray::Alloc(self, utf16_length));
- if (UNLIKELY(array.get() == nullptr)) {
+ StackHandleScope<1> hs(self);
+ Handle<CharArray> array(hs.NewHandle(CharArray::Alloc(self, utf16_length)));
+ if (UNLIKELY(array.Get() == nullptr)) {
return nullptr;
}
return Alloc(self, array);
}
-String* String::Alloc(Thread* self, const SirtRef<CharArray>& array) {
+String* String::Alloc(Thread* self, const Handle<CharArray>& array) {
// Hold reference in case AllocObject causes GC.
String* string = down_cast<String*>(GetJavaLangString()->AllocObject(self));
if (LIKELY(string != nullptr)) {
- string->SetArray(array.get());
+ string->SetArray(array.Get());
string->SetCount(array->GetLength());
}
return string;
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index f97308e..169b671 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -24,7 +24,7 @@
namespace art {
-template<class T> class SirtRef;
+template<class T> class Handle;
struct StringClassOffsets;
struct StringOffsets;
class StringPiece;
@@ -137,7 +137,7 @@ class MANAGED String : public Object {
static String* Alloc(Thread* self, int32_t utf16_length)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static String* Alloc(Thread* self, const SirtRef<CharArray>& array)
+ static String* Alloc(Thread* self, const Handle<CharArray>& array)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetArray(CharArray* new_array) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 38b77d1..822e0fb 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -632,18 +632,18 @@ void Monitor::Inflate(Thread* self, Thread* owner, mirror::Object* obj, int32_t
}
}
-void Monitor::InflateThinLocked(Thread* self, SirtRef<mirror::Object>& obj, LockWord lock_word,
+void Monitor::InflateThinLocked(Thread* self, Handle<mirror::Object>& obj, LockWord lock_word,
uint32_t hash_code) {
DCHECK_EQ(lock_word.GetState(), LockWord::kThinLocked);
uint32_t owner_thread_id = lock_word.ThinLockOwner();
if (owner_thread_id == self->GetThreadId()) {
// We own the monitor, we can easily inflate it.
- Inflate(self, self, obj.get(), hash_code);
+ Inflate(self, self, obj.Get(), hash_code);
} else {
ThreadList* thread_list = Runtime::Current()->GetThreadList();
// Suspend the owner, inflate. First change to blocked and give up mutator_lock_.
ScopedThreadStateChange tsc(self, kBlocked);
- self->SetMonitorEnterObject(obj.get());
+ self->SetMonitorEnterObject(obj.Get());
if (lock_word == obj->GetLockWord(true)) { // If lock word hasn't changed.
bool timed_out;
Thread* owner = thread_list->SuspendThreadByThreadId(owner_thread_id, false, &timed_out);
@@ -653,7 +653,7 @@ void Monitor::InflateThinLocked(Thread* self, SirtRef<mirror::Object>& obj, Lock
if (lock_word.GetState() == LockWord::kThinLocked &&
lock_word.ThinLockOwner() == owner_thread_id) {
// Go ahead and inflate the lock.
- Inflate(self, owner, obj.get(), hash_code);
+ Inflate(self, owner, obj.Get(), hash_code);
}
thread_list->Resume(owner, false);
}
@@ -680,15 +680,16 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) {
obj = FakeLock(obj);
uint32_t thread_id = self->GetThreadId();
size_t contention_count = 0;
- SirtRef<mirror::Object> sirt_obj(self, obj);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Object> h_obj(hs.NewHandle(obj));
while (true) {
- LockWord lock_word = sirt_obj->GetLockWord(true);
+ LockWord lock_word = h_obj->GetLockWord(true);
switch (lock_word.GetState()) {
case LockWord::kUnlocked: {
LockWord thin_locked(LockWord::FromThinLockId(thread_id, 0));
- if (sirt_obj->CasLockWord(lock_word, thin_locked)) {
+ if (h_obj->CasLockWord(lock_word, thin_locked)) {
QuasiAtomic::MembarLoadLoad();
- return sirt_obj.get(); // Success!
+ return h_obj.Get(); // Success!
}
continue; // Go again.
}
@@ -699,11 +700,11 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) {
uint32_t new_count = lock_word.ThinLockCount() + 1;
if (LIKELY(new_count <= LockWord::kThinLockMaxCount)) {
LockWord thin_locked(LockWord::FromThinLockId(thread_id, new_count));
- sirt_obj->SetLockWord(thin_locked, true);
- return sirt_obj.get(); // Success!
+ h_obj->SetLockWord(thin_locked, true);
+ return h_obj.Get(); // Success!
} else {
// We'd overflow the recursion count, so inflate the monitor.
- InflateThinLocked(self, sirt_obj, lock_word, 0);
+ InflateThinLocked(self, h_obj, lock_word, 0);
}
} else {
// Contention.
@@ -713,7 +714,7 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) {
NanoSleep(1000); // Sleep for 1us and re-attempt.
} else {
contention_count = 0;
- InflateThinLocked(self, sirt_obj, lock_word, 0);
+ InflateThinLocked(self, h_obj, lock_word, 0);
}
}
continue; // Start from the beginning.
@@ -721,15 +722,15 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) {
case LockWord::kFatLocked: {
Monitor* mon = lock_word.FatLockMonitor();
mon->Lock(self);
- return sirt_obj.get(); // Success!
+ return h_obj.Get(); // Success!
}
case LockWord::kHashCode:
// Inflate with the existing hashcode.
- Inflate(self, nullptr, sirt_obj.get(), lock_word.GetHashCode());
+ Inflate(self, nullptr, h_obj.Get(), lock_word.GetHashCode());
continue; // Start from the beginning.
default: {
LOG(FATAL) << "Invalid monitor state " << lock_word.GetState();
- return sirt_obj.get();
+ return h_obj.Get();
}
}
}
@@ -740,12 +741,13 @@ bool Monitor::MonitorExit(Thread* self, mirror::Object* obj) {
DCHECK(obj != NULL);
obj = FakeUnlock(obj);
LockWord lock_word = obj->GetLockWord(true);
- SirtRef<mirror::Object> sirt_obj(self, obj);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Object> h_obj(hs.NewHandle(obj));
switch (lock_word.GetState()) {
case LockWord::kHashCode:
// Fall-through.
case LockWord::kUnlocked:
- FailedUnlock(sirt_obj.get(), self, nullptr, nullptr);
+ FailedUnlock(h_obj.Get(), self, nullptr, nullptr);
return false; // Failure.
case LockWord::kThinLocked: {
uint32_t thread_id = self->GetThreadId();
@@ -754,16 +756,16 @@ bool Monitor::MonitorExit(Thread* self, mirror::Object* obj) {
// TODO: there's a race here with the owner dying while we unlock.
Thread* owner =
Runtime::Current()->GetThreadList()->FindThreadByThreadId(lock_word.ThinLockOwner());
- FailedUnlock(sirt_obj.get(), self, owner, nullptr);
+ FailedUnlock(h_obj.Get(), self, owner, nullptr);
return false; // Failure.
} else {
// We own the lock, decrease the recursion count.
if (lock_word.ThinLockCount() != 0) {
uint32_t new_count = lock_word.ThinLockCount() - 1;
LockWord thin_locked(LockWord::FromThinLockId(thread_id, new_count));
- sirt_obj->SetLockWord(thin_locked, true);
+ h_obj->SetLockWord(thin_locked, true);
} else {
- sirt_obj->SetLockWord(LockWord(), true);
+ h_obj->SetLockWord(LockWord(), true);
}
return true; // Success!
}
@@ -946,7 +948,7 @@ void Monitor::VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::O
// TODO: use the JNI implementation's table of explicit MonitorEnter calls and dump those too.
if (m->IsNative()) {
if (m->IsSynchronized()) {
- mirror::Object* jni_this = stack_visitor->GetCurrentSirt()->GetReference(0);
+ mirror::Object* jni_this = stack_visitor->GetCurrentHandleScope()->GetReference(0);
callback(jni_this, callback_context);
}
return;
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 15620d5..bc5d2e4 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -32,7 +32,7 @@
namespace art {
class LockWord;
-template<class T> class SirtRef;
+template<class T> class Handle;
class Thread;
class StackVisitor;
typedef uint32_t MonitorId;
@@ -114,7 +114,7 @@ class Monitor {
return monitor_id_;
}
- static void InflateThinLocked(Thread* self, SirtRef<mirror::Object>& obj, LockWord lock_word,
+ static void InflateThinLocked(Thread* self, Handle<mirror::Object>& obj, LockWord lock_word,
uint32_t hash_code) NO_THREAD_SAFETY_ANALYSIS;
static bool Deflate(Thread* self, mirror::Object* obj)
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index ed1ee7a..52abaab 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -186,7 +186,9 @@ static jclass DexFile_defineClassNative(JNIEnv* env, jclass, jstring javaName, j
ScopedObjectAccess soa(env);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
class_linker->RegisterDexFile(*dex_file);
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(javaLoader));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(javaLoader)));
mirror::Class* result = class_linker->DefineClass(descriptor.c_str(), class_loader, *dex_file,
*dex_class_def);
VLOG(class_linker) << "DexFile_defineClassNative returning " << result;
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 76c5866..50a8e47 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -214,7 +214,7 @@ static void PreloadDexCachesStringsCallback(mirror::Object** root, void* arg,
}
// Based on ClassLinker::ResolveString.
-static void PreloadDexCachesResolveString(SirtRef<mirror::DexCache>& dex_cache, uint32_t string_idx,
+static void PreloadDexCachesResolveString(Handle<mirror::DexCache>& dex_cache, uint32_t string_idx,
StringTable& strings)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::String* string = dex_cache->GetResolvedString(string_idx);
@@ -260,7 +260,7 @@ static void PreloadDexCachesResolveType(mirror::DexCache* dex_cache, uint32_t ty
}
// Based on ClassLinker::ResolveField.
-static void PreloadDexCachesResolveField(SirtRef<mirror::DexCache>& dex_cache,
+static void PreloadDexCachesResolveField(Handle<mirror::DexCache>& dex_cache,
uint32_t field_idx,
bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -275,9 +275,9 @@ static void PreloadDexCachesResolveField(SirtRef<mirror::DexCache>& dex_cache,
return;
}
if (is_static) {
- field = klass->FindStaticField(dex_cache.get(), field_idx);
+ field = klass->FindStaticField(dex_cache.Get(), field_idx);
} else {
- field = klass->FindInstanceField(dex_cache.get(), field_idx);
+ field = klass->FindInstanceField(dex_cache.Get(), field_idx);
}
if (field == NULL) {
return;
@@ -287,7 +287,7 @@ static void PreloadDexCachesResolveField(SirtRef<mirror::DexCache>& dex_cache,
}
// Based on ClassLinker::ResolveMethod.
-static void PreloadDexCachesResolveMethod(SirtRef<mirror::DexCache>& dex_cache,
+static void PreloadDexCachesResolveMethod(Handle<mirror::DexCache>& dex_cache,
uint32_t method_idx,
InvokeType invoke_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -304,14 +304,14 @@ static void PreloadDexCachesResolveMethod(SirtRef<mirror::DexCache>& dex_cache,
switch (invoke_type) {
case kDirect:
case kStatic:
- method = klass->FindDirectMethod(dex_cache.get(), method_idx);
+ method = klass->FindDirectMethod(dex_cache.Get(), method_idx);
break;
case kInterface:
- method = klass->FindInterfaceMethod(dex_cache.get(), method_idx);
+ method = klass->FindInterfaceMethod(dex_cache.Get(), method_idx);
break;
case kSuper:
case kVirtual:
- method = klass->FindVirtualMethod(dex_cache.get(), method_idx);
+ method = klass->FindVirtualMethod(dex_cache.Get(), method_idx);
break;
default:
LOG(FATAL) << "Unreachable - invocation type: " << invoke_type;
@@ -434,7 +434,8 @@ static void VMRuntime_preloadDexCaches(JNIEnv* env, jobject) {
for (size_t i = 0; i< boot_class_path.size(); i++) {
const DexFile* dex_file = boot_class_path[i];
CHECK(dex_file != NULL);
- SirtRef<mirror::DexCache> dex_cache(self, linker->FindDexCache(*dex_file));
+ StackHandleScope<1> hs(self);
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->FindDexCache(*dex_file)));
if (kPreloadDexCachesStrings) {
for (size_t i = 0; i < dex_cache->NumStrings(); i++) {
@@ -444,7 +445,7 @@ static void VMRuntime_preloadDexCaches(JNIEnv* env, jobject) {
if (kPreloadDexCachesTypes) {
for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
- PreloadDexCachesResolveType(dex_cache.get(), i);
+ PreloadDexCachesResolveType(dex_cache.Get(), i);
}
}
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 6daf9a9..b6cf7d8 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -62,12 +62,12 @@ static jclass Class_classForName(JNIEnv* env, jclass, jstring javaName, jboolean
}
std::string descriptor(DotToDescriptor(name.c_str()));
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(javaLoader));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(soa.Decode<mirror::ClassLoader*>(javaLoader)));
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- SirtRef<mirror::Class> c(soa.Self(), class_linker->FindClass(soa.Self(), descriptor.c_str(),
- class_loader));
- if (c.get() == nullptr) {
+ Handle<mirror::Class> c(
+ hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor.c_str(), class_loader)));
+ if (c.Get() == nullptr) {
ScopedLocalRef<jthrowable> cause(env, env->ExceptionOccurred());
env->ExceptionClear();
jthrowable cnfe = reinterpret_cast<jthrowable>(env->NewObject(WellKnownClasses::java_lang_ClassNotFoundException,
@@ -79,7 +79,7 @@ static jclass Class_classForName(JNIEnv* env, jclass, jstring javaName, jboolean
if (initialize) {
class_linker->EnsureInitialized(c, true, true);
}
- return soa.AddLocalReference<jclass>(c.get());
+ return soa.AddLocalReference<jclass>(c.Get());
}
static jstring Class_getNameNative(JNIEnv* env, jobject javaThis) {
diff --git a/runtime/native/java_lang_Runtime.cc b/runtime/native/java_lang_Runtime.cc
index 636be5d..496a1b2 100644
--- a/runtime/native/java_lang_Runtime.cc
+++ b/runtime/native/java_lang_Runtime.cc
@@ -19,12 +19,13 @@
#include <unistd.h>
#include "gc/heap.h"
+#include "handle_scope-inl.h"
#include "jni_internal.h"
#include "mirror/class_loader.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
#include "ScopedUtfChars.h"
-#include "sirt_ref-inl.h"
+#include "verify_object-inl.h"
namespace art {
@@ -65,8 +66,9 @@ static jstring Runtime_nativeLoad(JNIEnv* env, jclass, jstring javaFilename, job
std::string detail;
{
ScopedObjectAccess soa(env);
- SirtRef<mirror::ClassLoader> classLoader(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(javaLoader));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> classLoader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(javaLoader)));
JavaVMExt* vm = Runtime::Current()->GetJavaVM();
bool success = vm->LoadNativeLibrary(filename.c_str(), classLoader, &detail);
if (success) {
diff --git a/runtime/native/java_lang_reflect_Array.cc b/runtime/native/java_lang_reflect_Array.cc
index a991818..7c6f2f3 100644
--- a/runtime/native/java_lang_reflect_Array.cc
+++ b/runtime/native/java_lang_reflect_Array.cc
@@ -22,21 +22,22 @@
#include "mirror/object-inl.h"
#include "object_utils.h"
#include "scoped_fast_native_object_access.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
namespace art {
static jobject Array_createMultiArray(JNIEnv* env, jclass, jclass javaElementClass, jobject javaDimArray) {
ScopedFastNativeObjectAccess soa(env);
DCHECK(javaElementClass != NULL);
- SirtRef<mirror::Class> element_class(soa.Self(), soa.Decode<mirror::Class*>(javaElementClass));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::Class> element_class(hs.NewHandle(soa.Decode<mirror::Class*>(javaElementClass)));
DCHECK(element_class->IsClass());
DCHECK(javaDimArray != NULL);
mirror::Object* dimensions_obj = soa.Decode<mirror::Object*>(javaDimArray);
DCHECK(dimensions_obj->IsArrayInstance());
DCHECK_STREQ(ClassHelper(dimensions_obj->GetClass()).GetDescriptor(), "[I");
- SirtRef<mirror::IntArray> dimensions_array(soa.Self(),
- down_cast<mirror::IntArray*>(dimensions_obj));
+ Handle<mirror::IntArray> dimensions_array(
+ hs.NewHandle(down_cast<mirror::IntArray*>(dimensions_obj)));
mirror::Array* new_array = mirror::Array::CreateMultiArray(soa.Self(), element_class,
dimensions_array);
return soa.AddLocalReference<jobject>(new_array);
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index 2445b53..1981bfd 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -38,13 +38,14 @@ static jobject Constructor_newInstance(JNIEnv* env, jobject javaMethod, jobjectA
jboolean accessible) {
ScopedFastNativeObjectAccess soa(env);
mirror::ArtMethod* m = mirror::ArtMethod::FromReflectedMethod(soa, javaMethod);
- SirtRef<mirror::Class> c(soa.Self(), m->GetDeclaringClass());
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Class> c(hs.NewHandle(m->GetDeclaringClass()));
if (UNLIKELY(c->IsAbstract())) {
ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/InstantiationException;",
"Can't instantiate %s %s",
c->IsInterface() ? "interface" : "abstract class",
- PrettyDescriptor(c.get()).c_str());
+ PrettyDescriptor(c.Get()).c_str());
return nullptr;
}
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index ce622d9..0d54772 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -94,13 +94,14 @@ static bool CheckReceiver(const ScopedFastNativeObjectAccess& soa, jobject j_rcv
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
soa.Self()->AssertThreadSuspensionIsAllowable();
if (f->IsStatic()) {
- SirtRef<mirror::Class> sirt_klass(soa.Self(), f->GetDeclaringClass());
- if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(sirt_klass, true, true))) {
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Class> h_klass(hs.NewHandle(f->GetDeclaringClass()));
+ if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_klass, true, true))) {
DCHECK(soa.Self()->IsExceptionPending());
*class_or_rcvr = nullptr;
return false;
}
- *class_or_rcvr = sirt_klass.get();
+ *class_or_rcvr = h_klass.Get();
return true;
}
@@ -271,7 +272,8 @@ static void Field_set(JNIEnv* env, jobject javaField, jobject javaObj, jobject j
const char* field_type_desciptor = fh.GetTypeDescriptor();
field_prim_type = Primitive::GetType(field_type_desciptor[0]);
if (field_prim_type == Primitive::kPrimNot) {
- SirtRef<mirror::Object> sirt_obj(soa.Self(), o);
+ StackHandleScope<1> hs(soa.Self());
+ HandleWrapper<mirror::Object> h(hs.NewHandleWrapper(&o));
// May cause resolution.
CHECK(!kMovingFields) << "Resolution may trigger thread suspension";
field_type = fh.GetType(true);
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 8477723..e5dc53c 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -153,7 +153,7 @@ class OatFile {
return type_;
}
- // get the OatMethod entry based on its index into the class
+ // Get the OatMethod entry based on its index into the class
// defintion. direct methods come first, followed by virtual
// methods. note that runtime created methods such as miranda
// methods are not included.
diff --git a/runtime/object_utils.h b/runtime/object_utils.h
index 504537a..0dd6ca1 100644
--- a/runtime/object_utils.h
+++ b/runtime/object_utils.h
@@ -29,7 +29,7 @@
#include "mirror/string.h"
#include "runtime.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
#include <string>
@@ -38,33 +38,33 @@ namespace art {
template <typename T>
class ObjectLock {
public:
- explicit ObjectLock(Thread* self, const SirtRef<T>* object)
+ explicit ObjectLock(Thread* self, const Handle<T>* object)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: self_(self), obj_(object) {
CHECK(object != nullptr);
- CHECK(object->get() != nullptr);
- obj_->get()->MonitorEnter(self_);
+ CHECK(object->Get() != nullptr);
+ obj_->Get()->MonitorEnter(self_);
}
~ObjectLock() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- obj_->get()->MonitorExit(self_);
+ obj_->Get()->MonitorExit(self_);
}
void WaitIgnoringInterrupts() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Monitor::Wait(self_, obj_->get(), 0, 0, false, kWaiting);
+ Monitor::Wait(self_, obj_->Get(), 0, 0, false, kWaiting);
}
void Notify() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- obj_->get()->Notify(self_);
+ obj_->Get()->Notify(self_);
}
void NotifyAll() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- obj_->get()->NotifyAll(self_);
+ obj_->Get()->NotifyAll(self_);
}
private:
Thread* const self_;
- const SirtRef<T>* const obj_;
+ const Handle<T>* const obj_;
DISALLOW_COPY_AND_ASSIGN(ObjectLock);
};
@@ -378,7 +378,8 @@ class MethodHelper {
const DexFile& dex_file = GetDexFile();
uint32_t dex_method_idx = method_->GetDexMethodIndex();
const DexFile::MethodId& method_id = dex_file.GetMethodId(dex_method_idx);
- SirtRef<mirror::DexCache> dex_cache(Thread::Current(), GetDexCache());
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(GetDexCache()));
return GetClassLinker()->ResolveString(dex_file, method_id.name_idx_, dex_cache);
}
@@ -607,7 +608,8 @@ class MethodHelper {
mirror::String* ResolveString(uint32_t string_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::String* s = method_->GetDexCacheStrings()->Get(string_idx);
if (UNLIKELY(s == nullptr)) {
- SirtRef<mirror::DexCache> dex_cache(Thread::Current(), GetDexCache());
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(GetDexCache()));
s = GetClassLinker()->ResolveString(GetDexFile(), string_idx, dex_cache);
}
return s;
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index eebfba8..8517e34 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -107,8 +107,9 @@ class ProxyTest : public CommonCompilerTest {
TEST_F(ProxyTest, ProxyClassHelper) {
ScopedObjectAccess soa(Thread::Current());
jobject jclass_loader = LoadDex("Interfaces");
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(jclass_loader));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
mirror::Class* I = class_linker_->FindClass(soa.Self(), "LInterfaces$I;", class_loader);
mirror::Class* J = class_linker_->FindClass(soa.Self(), "LInterfaces$J;", class_loader);
@@ -136,8 +137,9 @@ TEST_F(ProxyTest, ProxyClassHelper) {
TEST_F(ProxyTest, ProxyFieldHelper) {
ScopedObjectAccess soa(Thread::Current());
jobject jclass_loader = LoadDex("Interfaces");
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(jclass_loader));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
mirror::Class* I = class_linker_->FindClass(soa.Self(), "LInterfaces$I;", class_loader);
mirror::Class* J = class_linker_->FindClass(soa.Self(), "LInterfaces$J;", class_loader);
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index aee0d64..8300195 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -20,7 +20,7 @@
#include "deoptimize_stack_visitor.h"
#include "entrypoints/entrypoint_utils.h"
#include "mirror/art_method-inl.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
namespace art {
@@ -35,10 +35,11 @@ QuickExceptionHandler::QuickExceptionHandler(Thread* self, bool is_deoptimizatio
void QuickExceptionHandler::FindCatch(const ThrowLocation& throw_location,
mirror::Throwable* exception) {
DCHECK(!is_deoptimization_);
- SirtRef<mirror::Throwable> exception_ref(self_, exception);
+ StackHandleScope<1> hs(self_);
+ Handle<mirror::Throwable> exception_ref(hs.NewHandle(exception));
// Walk the stack to find catch handler or prepare for deoptimization.
- CatchBlockStackVisitor visitor(self_, context_, exception_ref, this);
+ CatchBlockStackVisitor visitor(self_, context_, &exception_ref, this);
visitor.WalkStack(true);
mirror::ArtMethod* catch_method = *handler_quick_frame_;
@@ -56,13 +57,13 @@ void QuickExceptionHandler::FindCatch(const ThrowLocation& throw_location,
DCHECK(!self_->IsExceptionPending());
} else {
// Put exception back in root set with clear throw location.
- self_->SetException(ThrowLocation(), exception_ref.get());
+ self_->SetException(ThrowLocation(), exception_ref.Get());
}
// The debugger may suspend this thread and walk its stack. Let's do this before popping
// instrumentation frames.
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
instrumentation->ExceptionCaughtEvent(self_, throw_location, catch_method, handler_dex_pc_,
- exception_ref.get());
+ exception_ref.Get());
}
void QuickExceptionHandler::DeoptimizeStack() {
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index f0ba003..98310e6 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -467,11 +467,12 @@ jobject InvokeMethod(const ScopedObjectAccess& soa, jobject javaMethod,
mirror::Class* declaring_class = m->GetDeclaringClass();
if (UNLIKELY(!declaring_class->IsInitialized())) {
- SirtRef<mirror::Class> sirt_c(soa.Self(), declaring_class);
- if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(sirt_c, true, true)) {
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Class> h_class(hs.NewHandle(declaring_class));
+ if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_class, true, true)) {
return nullptr;
}
- declaring_class = sirt_c.get();
+ declaring_class = h_class.Get();
}
mirror::Object* receiver = nullptr;
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index f7fc020..3b66abe 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -87,10 +87,10 @@ class ReflectionTest : public CommonCompilerTest {
const char* class_name = is_static ? "StaticLeafMethods" : "NonStaticLeafMethods";
jobject jclass_loader(LoadDex(class_name));
Thread* self = Thread::Current();
- SirtRef<mirror::ClassLoader> null_class_loader(self, nullptr);
- SirtRef<mirror::ClassLoader>
- class_loader(self,
- ScopedObjectAccessUnchecked(self).Decode<mirror::ClassLoader*>(jclass_loader));
+ StackHandleScope<2> hs(self);
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(
+ ScopedObjectAccessUnchecked(self).Decode<mirror::ClassLoader*>(jclass_loader)));
if (is_static) {
MakeExecutable(ScopedObjectAccessUnchecked(self).Decode<mirror::ClassLoader*>(jclass_loader),
class_name);
@@ -485,8 +485,9 @@ TEST_F(ReflectionTest, StaticMainMethod) {
TEST_DISABLED_FOR_PORTABLE();
ScopedObjectAccess soa(Thread::Current());
jobject jclass_loader = LoadDex("Main");
- SirtRef<mirror::ClassLoader>
- class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(jclass_loader));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
CompileDirectMethod(class_loader, "Main", "main", "([Ljava/lang/String;)V");
mirror::Class* klass = class_linker_->FindClass(soa.Self(), "LMain;", class_loader);
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 99d43f4..78a93fd 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -66,7 +66,7 @@
#include "scoped_thread_state_change.h"
#include "signal_catcher.h"
#include "signal_set.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
#include "thread.h"
#include "thread_list.h"
#include "trace.h"
@@ -331,8 +331,9 @@ jobject CreateSystemClassLoader() {
ScopedObjectAccess soa(Thread::Current());
ClassLinker* cl = Runtime::Current()->GetClassLinker();
- SirtRef<mirror::Class> class_loader_class(
- soa.Self(), soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ClassLoader));
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::Class> class_loader_class(
+ hs.NewHandle(soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ClassLoader)));
CHECK(cl->EnsureInitialized(class_loader_class, true, true));
mirror::ArtMethod* getSystemClassLoader =
@@ -340,19 +341,18 @@ jobject CreateSystemClassLoader() {
CHECK(getSystemClassLoader != NULL);
JValue result = InvokeWithJValues(soa, nullptr, soa.EncodeMethod(getSystemClassLoader), nullptr);
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- down_cast<mirror::ClassLoader*>(result.GetL()));
- CHECK(class_loader.get() != nullptr);
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(down_cast<mirror::ClassLoader*>(result.GetL())));
+ CHECK(class_loader.Get() != nullptr);
JNIEnv* env = soa.Self()->GetJniEnv();
ScopedLocalRef<jobject> system_class_loader(env,
- soa.AddLocalReference<jobject>(class_loader.get()));
+ soa.AddLocalReference<jobject>(class_loader.Get()));
CHECK(system_class_loader.get() != nullptr);
- soa.Self()->SetClassLoaderOverride(class_loader.get());
+ soa.Self()->SetClassLoaderOverride(class_loader.Get());
- SirtRef<mirror::Class> thread_class(
- soa.Self(),
- soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread));
+ Handle<mirror::Class> thread_class(
+ hs.NewHandle(soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread)));
CHECK(cl->EnsureInitialized(thread_class, true, true));
mirror::ArtField* contextClassLoader =
@@ -360,7 +360,7 @@ jobject CreateSystemClassLoader() {
CHECK(contextClassLoader != NULL);
// We can't run in a transaction yet.
- contextClassLoader->SetObject<false>(soa.Self()->GetPeer(), class_loader.get());
+ contextClassLoader->SetObject<false>(soa.Self()->GetPeer(), class_loader.Get());
return env->NewGlobalRef(system_class_loader.get());
}
@@ -682,7 +682,8 @@ void Runtime::InitNativeMethods() {
std::string mapped_name(StringPrintf(OS_SHARED_LIB_FORMAT_STR, "javacore"));
std::string reason;
self->TransitionFromSuspendedToRunnable();
- SirtRef<mirror::ClassLoader> class_loader(self, nullptr);
+ StackHandleScope<1> hs(self);
+ auto class_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
if (!instance_->java_vm_->LoadNativeLibrary(mapped_name, class_loader, &reason)) {
LOG(FATAL) << "LoadNativeLibrary failed for \"" << mapped_name << "\": " << reason;
}
@@ -944,19 +945,22 @@ void Runtime::VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags
mirror::ObjectArray<mirror::ArtMethod>* Runtime::CreateDefaultImt(ClassLinker* cl) {
Thread* self = Thread::Current();
- SirtRef<mirror::ObjectArray<mirror::ArtMethod> > imtable(self, cl->AllocArtMethodArray(self, 64));
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ObjectArray<mirror::ArtMethod>> imtable(
+ hs.NewHandle(cl->AllocArtMethodArray(self, 64)));
mirror::ArtMethod* imt_conflict_method = Runtime::Current()->GetImtConflictMethod();
for (size_t i = 0; i < static_cast<size_t>(imtable->GetLength()); i++) {
imtable->Set<false>(i, imt_conflict_method);
}
- return imtable.get();
+ return imtable.Get();
}
mirror::ArtMethod* Runtime::CreateImtConflictMethod() {
Thread* self = Thread::Current();
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
- SirtRef<mirror::ArtMethod> method(self, class_linker->AllocArtMethod(self));
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ArtMethod> method(hs.NewHandle(class_linker->AllocArtMethod(self)));
method->SetDeclaringClass(mirror::ArtMethod::GetJavaLangReflectArtMethod());
// TODO: use a special method for imt conflict method saves.
method->SetDexMethodIndex(DexFile::kDexNoIndex);
@@ -968,14 +972,15 @@ mirror::ArtMethod* Runtime::CreateImtConflictMethod() {
method->SetEntryPointFromPortableCompiledCode(GetPortableImtConflictTrampoline(class_linker));
method->SetEntryPointFromQuickCompiledCode(GetQuickImtConflictTrampoline(class_linker));
}
- return method.get();
+ return method.Get();
}
mirror::ArtMethod* Runtime::CreateResolutionMethod() {
Thread* self = Thread::Current();
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
- SirtRef<mirror::ArtMethod> method(self, class_linker->AllocArtMethod(self));
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ArtMethod> method(hs.NewHandle(class_linker->AllocArtMethod(self)));
method->SetDeclaringClass(mirror::ArtMethod::GetJavaLangReflectArtMethod());
// TODO: use a special method for resolution method saves
method->SetDexMethodIndex(DexFile::kDexNoIndex);
@@ -987,21 +992,22 @@ mirror::ArtMethod* Runtime::CreateResolutionMethod() {
method->SetEntryPointFromPortableCompiledCode(GetPortableResolutionTrampoline(class_linker));
method->SetEntryPointFromQuickCompiledCode(GetQuickResolutionTrampoline(class_linker));
}
- return method.get();
+ return method.Get();
}
mirror::ArtMethod* Runtime::CreateCalleeSaveMethod(CalleeSaveType type) {
Thread* self = Thread::Current();
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
- SirtRef<mirror::ArtMethod> method(self, class_linker->AllocArtMethod(self));
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ArtMethod> method(hs.NewHandle(class_linker->AllocArtMethod(self)));
method->SetDeclaringClass(mirror::ArtMethod::GetJavaLangReflectArtMethod());
// TODO: use a special method for callee saves
method->SetDexMethodIndex(DexFile::kDexNoIndex);
method->SetEntryPointFromPortableCompiledCode(nullptr);
method->SetEntryPointFromQuickCompiledCode(nullptr);
DCHECK_NE(instruction_set_, kNone);
- return method.get();
+ return method.Get();
}
void Runtime::DisallowNewSystemWeaks() {
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index 7698d6a..dbd961f 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -25,7 +25,7 @@ namespace art {
// Scoped change into and out of a particular state. Handles Runnable transitions that require
// more complicated suspension checking. The subclasses ScopedObjectAccessUnchecked and
-// ScopedObjectAccess are used to handle the change into Runnable to get direct access to objects,
+// ScopedObjectAccess are used to handle the change into Runnable to Get direct access to objects,
// the unchecked variant doesn't aid annotalysis.
class ScopedThreadStateChange {
public:
diff --git a/runtime/sirt_ref-inl.h b/runtime/sirt_ref-inl.h
deleted file mode 100644
index 7de624a..0000000
--- a/runtime/sirt_ref-inl.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_SIRT_REF_INL_H_
-#define ART_RUNTIME_SIRT_REF_INL_H_
-
-#include "sirt_ref.h"
-
-#include "verify_object-inl.h"
-
-namespace art {
-
-template<class T> inline SirtRef<T>::SirtRef(Thread* self, T* object, bool should_verify)
- : self_(self), sirt_(object) {
- if (should_verify) {
- VerifyObject(object);
- }
- self_->PushSirt(&sirt_);
-}
-
-template<class T> inline SirtRef<T>::~SirtRef() {
- StackIndirectReferenceTable* top_sirt = self_->PopSirt();
- DCHECK_EQ(top_sirt, &sirt_);
-}
-
-template<class T> inline T* SirtRef<T>::reset(T* object, bool should_verify) {
- if (should_verify) {
- VerifyObject(object);
- }
- T* old_ref = get();
- sirt_.SetReference(0, object);
- return old_ref;
-}
-
-} // namespace art
-
-#endif // ART_RUNTIME_SIRT_REF_INL_H_
diff --git a/runtime/sirt_ref.h b/runtime/sirt_ref.h
deleted file mode 100644
index cf23891..0000000
--- a/runtime/sirt_ref.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_SIRT_REF_H_
-#define ART_RUNTIME_SIRT_REF_H_
-
-#include "base/casts.h"
-#include "base/logging.h"
-#include "base/macros.h"
-#include "stack_indirect_reference_table.h"
-#include "thread.h"
-
-namespace art {
-
-template<class T>
-class SirtRef {
- public:
- SirtRef(Thread* self, T* object, bool should_verify = true);
- ~SirtRef();
-
- T& operator*() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return *get();
- }
- T* operator->() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return get();
- }
- T* get() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return down_cast<T*>(sirt_.GetReference(0));
- }
-
- // Returns the old reference.
- T* reset(T* object = nullptr, bool should_verify = true)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- private:
- Thread* const self_;
- StackIndirectReferenceTable sirt_;
-
- DISALLOW_COPY_AND_ASSIGN(SirtRef);
-};
-
-// A version of SirtRef which disables the object verification.
-template<class T>
-class SirtRefNoVerify : public SirtRef<T> {
- public:
- SirtRefNoVerify(Thread* self, T* object) : SirtRef<T>(self, object, false) {}
- // Returns the old reference.
- T* reset(T* object = nullptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return SirtRef<T>::reset(object, false);
- }
-};
-
-} // namespace art
-
-#endif // ART_RUNTIME_SIRT_REF_H_
diff --git a/runtime/stack.cc b/runtime/stack.cc
index fd31ec6..e0189e9 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -111,11 +111,9 @@ mirror::Object* StackVisitor::GetThisObject() const {
return NULL;
} else if (m->IsNative()) {
if (cur_quick_frame_ != NULL) {
- StackIndirectReferenceTable* sirt =
- reinterpret_cast<StackIndirectReferenceTable*>(
- reinterpret_cast<char*>(cur_quick_frame_) +
- m->GetSirtOffsetInBytes());
- return sirt->GetReference(0);
+ HandleScope* hs = reinterpret_cast<HandleScope*>(
+ reinterpret_cast<char*>(cur_quick_frame_) + m->GetHandleScopeOffsetInBytes());
+ return hs->GetReference(0);
} else {
return cur_shadow_frame_->GetVRegReference(0);
}
@@ -277,7 +275,7 @@ void StackVisitor::SanityCheckFrame() const {
CHECK_NE(frame_size, 0u);
// A rough guess at an upper size we expect to see for a frame.
// 256 registers
- // 2 words Sirt overhead
+ // 2 words HandleScope overhead
// 3+3 register spills
// TODO: this seems architecture specific for the case of JNI frames.
// TODO: 083-compiler-regressions ManyFloatArgs shows this estimate is wrong.
diff --git a/runtime/stack.h b/runtime/stack.h
index 88ef78f..963983a 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -40,7 +40,7 @@ namespace mirror {
class Context;
class ShadowFrame;
-class StackIndirectReferenceTable;
+class HandleScope;
class ScopedObjectAccess;
class Thread;
@@ -677,10 +677,10 @@ class StackVisitor {
return cur_shadow_frame_;
}
- StackIndirectReferenceTable* GetCurrentSirt() const {
+ HandleScope* GetCurrentHandleScope() const {
mirror::ArtMethod** sp = GetCurrentQuickFrame();
- ++sp; // Skip Method*; SIRT comes next;
- return reinterpret_cast<StackIndirectReferenceTable*>(sp);
+ ++sp; // Skip Method*; handle scope comes next;
+ return reinterpret_cast<HandleScope*>(sp);
}
std::string DescribeLocation() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/stack_indirect_reference_table.h b/runtime/stack_indirect_reference_table.h
deleted file mode 100644
index 3b632e7..0000000
--- a/runtime/stack_indirect_reference_table.h
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_STACK_INDIRECT_REFERENCE_TABLE_H_
-#define ART_RUNTIME_STACK_INDIRECT_REFERENCE_TABLE_H_
-
-#include "base/logging.h"
-#include "base/macros.h"
-#include "stack.h"
-#include "utils.h"
-
-namespace art {
-namespace mirror {
-class Object;
-}
-class Thread;
-
-// Stack allocated indirect reference table. It can allocated within
-// the bridge frame between managed and native code backed by stack
-// storage or manually allocated by SirtRef to hold one reference.
-class StackIndirectReferenceTable {
- public:
- explicit StackIndirectReferenceTable(mirror::Object* object) :
- link_(NULL), number_of_references_(1) {
- references_[0].Assign(object);
- }
-
- ~StackIndirectReferenceTable() {}
-
- // Number of references contained within this SIRT.
- uint32_t NumberOfReferences() const {
- return number_of_references_;
- }
-
- // We have versions with and without explicit pointer size of the following. The first two are
- // used at runtime, so OFFSETOF_MEMBER computes the right offsets automatically. The last one
- // takes the pointer size explicitly so that at compile time we can cross-compile correctly.
-
- // Returns the size of a StackIndirectReferenceTable containing num_references sirts.
- static size_t SizeOf(uint32_t num_references) {
- size_t header_size = OFFSETOF_MEMBER(StackIndirectReferenceTable, references_);
- size_t data_size = sizeof(StackReference<mirror::Object>) * num_references;
- return header_size + data_size;
- }
-
- // Get the size of the SIRT for the number of entries, with padding added for potential alignment.
- static size_t GetAlignedSirtSize(uint32_t num_references) {
- size_t sirt_size = SizeOf(num_references);
- return RoundUp(sirt_size, 8);
- }
-
- // Get the size of the SIRT for the number of entries, with padding added for potential alignment.
- static size_t GetAlignedSirtSizeTarget(size_t pointer_size, uint32_t num_references) {
- // Assume that the layout is packed.
- size_t header_size = pointer_size + sizeof(number_of_references_);
- // This assumes there is no layout change between 32 and 64b.
- size_t data_size = sizeof(StackReference<mirror::Object>) * num_references;
- size_t sirt_size = header_size + data_size;
- return RoundUp(sirt_size, 8);
- }
-
- // Link to previous SIRT or NULL.
- StackIndirectReferenceTable* GetLink() const {
- return link_;
- }
-
- void SetLink(StackIndirectReferenceTable* sirt) {
- DCHECK_NE(this, sirt);
- link_ = sirt;
- }
-
- // Sets the number_of_references_ field for constructing tables out of raw memory. Warning: will
- // not resize anything.
- void SetNumberOfReferences(uint32_t num_references) {
- number_of_references_ = num_references;
- }
-
- mirror::Object* GetReference(size_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK_LT(i, number_of_references_);
- return references_[i].AsMirrorPtr();
- }
-
- StackReference<mirror::Object>* GetStackReference(size_t i)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK_LT(i, number_of_references_);
- return &references_[i];
- }
-
- void SetReference(size_t i, mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK_LT(i, number_of_references_);
- references_[i].Assign(object);
- }
-
- bool Contains(StackReference<mirror::Object>* sirt_entry) const {
- // A SIRT should always contain something. One created by the
- // jni_compiler should have a jobject/jclass as a native method is
- // passed in a this pointer or a class
- DCHECK_GT(number_of_references_, 0U);
- return ((&references_[0] <= sirt_entry)
- && (sirt_entry <= (&references_[number_of_references_ - 1])));
- }
-
- // Offset of link within SIRT, used by generated code
- static size_t LinkOffset(size_t pointer_size) {
- return 0;
- }
-
- // Offset of length within SIRT, used by generated code
- static size_t NumberOfReferencesOffset(size_t pointer_size) {
- return pointer_size;
- }
-
- // Offset of link within SIRT, used by generated code
- static size_t ReferencesOffset(size_t pointer_size) {
- return pointer_size + sizeof(number_of_references_);
- }
-
- private:
- StackIndirectReferenceTable() {}
-
- StackIndirectReferenceTable* link_;
- uint32_t number_of_references_;
-
- // number_of_references_ are available if this is allocated and filled in by jni_compiler.
- StackReference<mirror::Object> references_[1];
-
- DISALLOW_COPY_AND_ASSIGN(StackIndirectReferenceTable);
-};
-
-} // namespace art
-
-#endif // ART_RUNTIME_STACK_INDIRECT_REFERENCE_TABLE_H_
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 7ed0cb4..d535118 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -44,6 +44,7 @@
#include "gc/accounting/card_table-inl.h"
#include "gc/heap.h"
#include "gc/space/space.h"
+#include "handle_scope.h"
#include "indirect_reference_table-inl.h"
#include "jni_internal.h"
#include "mirror/art_field-inl.h"
@@ -61,9 +62,8 @@
#include "scoped_thread_state_change.h"
#include "ScopedLocalRef.h"
#include "ScopedUtfChars.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
#include "stack.h"
-#include "stack_indirect_reference_table.h"
#include "thread-inl.h"
#include "thread_list.h"
#include "utils.h"
@@ -157,11 +157,7 @@ void* Thread::CreateCallback(void* arg) {
self->tlsPtr_.opeer = soa.Decode<mirror::Object*>(self->tlsPtr_.jpeer);
self->GetJniEnv()->DeleteGlobalRef(self->tlsPtr_.jpeer);
self->tlsPtr_.jpeer = nullptr;
-
- {
- SirtRef<mirror::String> thread_name(self, self->GetThreadName(soa));
- self->SetThreadName(thread_name->ToModifiedUtf8().c_str());
- }
+ self->SetThreadName(self->GetThreadName(soa)->ToModifiedUtf8().c_str());
Dbg::PostThreadStart(self);
// Invoke the 'run' method of our java.lang.Thread.
@@ -431,8 +427,9 @@ void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group)
reinterpret_cast<jlong>(self));
ScopedObjectAccess soa(self);
- SirtRef<mirror::String> peer_thread_name(soa.Self(), GetThreadName(soa));
- if (peer_thread_name.get() == nullptr) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::String> peer_thread_name(hs.NewHandle(GetThreadName(soa)));
+ if (peer_thread_name.Get() == nullptr) {
// The Thread constructor should have set the Thread.name to a
// non-null value. However, because we can run without code
// available (in the compiler, in tests), we manually assign the
@@ -442,10 +439,10 @@ void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group)
} else {
InitPeer<false>(soa, thread_is_daemon, thread_group, thread_name.get(), thread_priority);
}
- peer_thread_name.reset(GetThreadName(soa));
+ peer_thread_name.Assign(GetThreadName(soa));
}
// 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null.
- if (peer_thread_name.get() != nullptr) {
+ if (peer_thread_name.Get() != nullptr) {
SetThreadName(peer_thread_name->ToModifiedUtf8().c_str());
}
}
@@ -951,8 +948,7 @@ void Thread::DumpStack(std::ostream& os) const {
// If we're currently in native code, dump that stack before dumping the managed stack.
if (dump_for_abort || ShouldShowNativeStack(this)) {
DumpKernelStack(os, GetTid(), " kernel: ", false);
- SirtRef<mirror::ArtMethod> method_ref(Thread::Current(), GetCurrentMethod(nullptr));
- DumpNativeStack(os, GetTid(), " native: ", method_ref.get());
+ DumpNativeStack(os, GetTid(), " native: ", GetCurrentMethod(nullptr));
}
DumpJavaStack(os);
} else {
@@ -1107,8 +1103,9 @@ void Thread::Destroy() {
soa.DecodeField(WellKnownClasses::java_lang_Thread_lock)->GetObject(tlsPtr_.opeer);
// (This conditional is only needed for tests, where Thread.lock won't have been set.)
if (lock != nullptr) {
- SirtRef<mirror::Object> sirt_obj(self, lock);
- ObjectLock<mirror::Object> locker(self, &sirt_obj);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Object> h_obj(hs.NewHandle(lock));
+ ObjectLock<mirror::Object> locker(self, &h_obj);
locker.NotifyAll();
}
}
@@ -1208,28 +1205,28 @@ void Thread::RemoveFromThreadGroup(ScopedObjectAccess& soa) {
}
}
-size_t Thread::NumSirtReferences() {
+size_t Thread::NumHandleReferences() {
size_t count = 0;
- for (StackIndirectReferenceTable* cur = tlsPtr_.top_sirt; cur; cur = cur->GetLink()) {
+ for (HandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) {
count += cur->NumberOfReferences();
}
return count;
}
-bool Thread::SirtContains(jobject obj) const {
- StackReference<mirror::Object>* sirt_entry =
+bool Thread::HandleScopeContains(jobject obj) const {
+ StackReference<mirror::Object>* hs_entry =
reinterpret_cast<StackReference<mirror::Object>*>(obj);
- for (StackIndirectReferenceTable* cur = tlsPtr_.top_sirt; cur; cur = cur->GetLink()) {
- if (cur->Contains(sirt_entry)) {
+ for (HandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) {
+ if (cur->Contains(hs_entry)) {
return true;
}
}
- // JNI code invoked from portable code uses shadow frames rather than the SIRT.
- return tlsPtr_.managed_stack.ShadowFramesContain(sirt_entry);
+ // JNI code invoked from portable code uses shadow frames rather than the handle scope.
+ return tlsPtr_.managed_stack.ShadowFramesContain(hs_entry);
}
-void Thread::SirtVisitRoots(RootCallback* visitor, void* arg, uint32_t thread_id) {
- for (StackIndirectReferenceTable* cur = tlsPtr_.top_sirt; cur; cur = cur->GetLink()) {
+void Thread::HandleScopeVisitRoots(RootCallback* visitor, void* arg, uint32_t thread_id) {
+ for (HandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) {
size_t num_refs = cur->NumberOfReferences();
for (size_t j = 0; j < num_refs; ++j) {
mirror::Object* object = cur->GetReference(j);
@@ -1256,11 +1253,11 @@ mirror::Object* Thread::DecodeJObject(jobject obj) const {
if (kind == kLocal) {
IndirectReferenceTable& locals = tlsPtr_.jni_env->locals;
result = locals.Get(ref);
- } else if (kind == kSirtOrInvalid) {
+ } else if (kind == kHandleScopeOrInvalid) {
// TODO: make stack indirect reference table lookup more efficient.
- // Check if this is a local reference in the SIRT.
- if (LIKELY(SirtContains(obj))) {
- // Read from SIRT.
+ // Check if this is a local reference in the handle scope.
+ if (LIKELY(HandleScopeContains(obj))) {
+ // Read from handle scope.
result = reinterpret_cast<StackReference<mirror::Object>*>(obj)->AsMirrorPtr();
VerifyObject(result);
} else {
@@ -1369,11 +1366,11 @@ class BuildInternalStackTraceVisitor : public StackVisitor {
bool Init(int depth)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Allocate method trace with an extra slot that will hold the PC trace
- SirtRef<mirror::ObjectArray<mirror::Object> >
- method_trace(self_,
- Runtime::Current()->GetClassLinker()->AllocObjectArray<mirror::Object>(self_,
- depth + 1));
- if (method_trace.get() == nullptr) {
+ StackHandleScope<1> hs(self_);
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ Handle<mirror::ObjectArray<mirror::Object>> method_trace(
+ hs.NewHandle(class_linker->AllocObjectArray<mirror::Object>(self_, depth + 1)));
+ if (method_trace.Get() == nullptr) {
return false;
}
mirror::IntArray* dex_pc_trace = mirror::IntArray::Alloc(self_, depth);
@@ -1388,7 +1385,7 @@ class BuildInternalStackTraceVisitor : public StackVisitor {
const char* last_no_suspend_cause =
self_->StartAssertNoThreadSuspension("Building internal stack trace");
CHECK(last_no_suspend_cause == nullptr) << last_no_suspend_cause;
- method_trace_ = method_trace.get();
+ method_trace_ = method_trace.Get();
dex_pc_trace_ = dex_pc_trace;
return true;
}
@@ -1498,11 +1495,12 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(const ScopedObje
mirror::ArtMethod* method = down_cast<mirror::ArtMethod*>(method_trace->Get(i));
MethodHelper mh(method);
int32_t line_number;
- SirtRef<mirror::String> class_name_object(soa.Self(), nullptr);
- SirtRef<mirror::String> source_name_object(soa.Self(), nullptr);
+ StackHandleScope<3> hs(soa.Self());
+ auto class_name_object(hs.NewHandle<mirror::String>(nullptr));
+ auto source_name_object(hs.NewHandle<mirror::String>(nullptr));
if (method->IsProxyMethod()) {
line_number = -1;
- class_name_object.reset(method->GetDeclaringClass()->GetName());
+ class_name_object.Assign(method->GetDeclaringClass()->GetName());
// source_name_object intentionally left null for proxy methods
} else {
mirror::IntArray* pc_trace = down_cast<mirror::IntArray*>(method_trace->Get(depth));
@@ -1513,24 +1511,23 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(const ScopedObje
const char* descriptor = mh.GetDeclaringClassDescriptor();
CHECK(descriptor != nullptr);
std::string class_name(PrettyDescriptor(descriptor));
- class_name_object.reset(mirror::String::AllocFromModifiedUtf8(soa.Self(), class_name.c_str()));
- if (class_name_object.get() == nullptr) {
+ class_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), class_name.c_str()));
+ if (class_name_object.Get() == nullptr) {
return nullptr;
}
const char* source_file = mh.GetDeclaringClassSourceFile();
if (source_file != nullptr) {
- source_name_object.reset(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file));
- if (source_name_object.get() == nullptr) {
+ source_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file));
+ if (source_name_object.Get() == nullptr) {
return nullptr;
}
}
}
const char* method_name = mh.GetName();
CHECK(method_name != nullptr);
- SirtRef<mirror::String> method_name_object(soa.Self(),
- mirror::String::AllocFromModifiedUtf8(soa.Self(),
- method_name));
- if (method_name_object.get() == nullptr) {
+ Handle<mirror::String> method_name_object(
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), method_name)));
+ if (method_name_object.Get() == nullptr) {
return nullptr;
}
mirror::StackTraceElement* obj = mirror::StackTraceElement::Alloc(
@@ -1573,23 +1570,24 @@ void Thread::ThrowNewWrappedException(const ThrowLocation& throw_location,
const char* msg) {
DCHECK_EQ(this, Thread::Current());
ScopedObjectAccessUnchecked soa(this);
+ StackHandleScope<5> hs(soa.Self());
// Ensure we don't forget arguments over object allocation.
- SirtRef<mirror::Object> saved_throw_this(this, throw_location.GetThis());
- SirtRef<mirror::ArtMethod> saved_throw_method(this, throw_location.GetMethod());
+ Handle<mirror::Object> saved_throw_this(hs.NewHandle(throw_location.GetThis()));
+ Handle<mirror::ArtMethod> saved_throw_method(hs.NewHandle(throw_location.GetMethod()));
// Ignore the cause throw location. TODO: should we report this as a re-throw?
ScopedLocalRef<jobject> cause(GetJniEnv(), soa.AddLocalReference<jobject>(GetException(nullptr)));
ClearException();
Runtime* runtime = Runtime::Current();
mirror::ClassLoader* cl = nullptr;
- if (saved_throw_method.get() != nullptr) {
- cl = saved_throw_method.get()->GetDeclaringClass()->GetClassLoader();
- }
- SirtRef<mirror::ClassLoader> class_loader(this, cl);
- SirtRef<mirror::Class>
- exception_class(this, runtime->GetClassLinker()->FindClass(this, exception_class_descriptor,
- class_loader));
- if (UNLIKELY(exception_class.get() == nullptr)) {
+ if (saved_throw_method.Get() != nullptr) {
+ cl = saved_throw_method.Get()->GetDeclaringClass()->GetClassLoader();
+ }
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(cl));
+ Handle<mirror::Class> exception_class(
+ hs.NewHandle(runtime->GetClassLinker()->FindClass(this, exception_class_descriptor,
+ class_loader)));
+ if (UNLIKELY(exception_class.Get() == nullptr)) {
CHECK(IsExceptionPending());
LOG(ERROR) << "No exception class " << PrettyDescriptor(exception_class_descriptor);
return;
@@ -1600,12 +1598,12 @@ void Thread::ThrowNewWrappedException(const ThrowLocation& throw_location,
return;
}
DCHECK(!runtime->IsStarted() || exception_class->IsThrowableClass());
- SirtRef<mirror::Throwable> exception(this,
- down_cast<mirror::Throwable*>(exception_class->AllocObject(this)));
+ Handle<mirror::Throwable> exception(
+ hs.NewHandle(down_cast<mirror::Throwable*>(exception_class->AllocObject(this))));
// If we couldn't allocate the exception, throw the pre-allocated out of memory exception.
- if (exception.get() == nullptr) {
- ThrowLocation gc_safe_throw_location(saved_throw_this.get(), saved_throw_method.get(),
+ if (exception.Get() == nullptr) {
+ ThrowLocation gc_safe_throw_location(saved_throw_this.Get(), saved_throw_method.Get(),
throw_location.GetDexPc());
SetException(gc_safe_throw_location, Runtime::Current()->GetPreAllocatedOutOfMemoryError());
return;
@@ -1657,9 +1655,9 @@ void Thread::ThrowNewWrappedException(const ThrowLocation& throw_location,
if (trace.get() != nullptr) {
exception->SetStackState(down_cast<mirror::Throwable*>(DecodeJObject(trace.get())));
}
- ThrowLocation gc_safe_throw_location(saved_throw_this.get(), saved_throw_method.get(),
+ ThrowLocation gc_safe_throw_location(saved_throw_this.Get(), saved_throw_method.Get(),
throw_location.GetDexPc());
- SetException(gc_safe_throw_location, exception.get());
+ SetException(gc_safe_throw_location, exception.Get());
} else {
jvalue jv_args[2];
size_t i = 0;
@@ -1672,11 +1670,11 @@ void Thread::ThrowNewWrappedException(const ThrowLocation& throw_location,
jv_args[i].l = cause.get();
++i;
}
- InvokeWithJValues(soa, exception.get(), soa.EncodeMethod(exception_init_method), jv_args);
+ InvokeWithJValues(soa, exception.Get(), soa.EncodeMethod(exception_init_method), jv_args);
if (LIKELY(!IsExceptionPending())) {
- ThrowLocation gc_safe_throw_location(saved_throw_this.get(), saved_throw_method.get(),
+ ThrowLocation gc_safe_throw_location(saved_throw_this.Get(), saved_throw_method.Get(),
throw_location.GetDexPc());
- SetException(gc_safe_throw_location, exception.get());
+ SetException(gc_safe_throw_location, exception.Get());
}
}
}
@@ -1733,7 +1731,7 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
DO_THREAD_OFFSET(TopOfManagedStackOffset<ptr_size>(), "top_quick_frame_method")
DO_THREAD_OFFSET(TopOfManagedStackPcOffset<ptr_size>(), "top_quick_frame_pc")
DO_THREAD_OFFSET(TopShadowFrameOffset<ptr_size>(), "top_shadow_frame")
- DO_THREAD_OFFSET(TopSirtOffset<ptr_size>(), "top_sirt")
+ DO_THREAD_OFFSET(TopHandleScopeOffset<ptr_size>(), "top_handle_scope")
DO_THREAD_OFFSET(ThreadSuspendTriggerOffset<ptr_size>(), "suspend_trigger")
#undef DO_THREAD_OFFSET
@@ -1967,7 +1965,7 @@ class ReferenceMapVisitor : public StackVisitor {
mirror::ArtMethod* m = shadow_frame->GetMethod();
size_t num_regs = shadow_frame->NumberOfVRegs();
if (m->IsNative() || shadow_frame->HasReferenceArray()) {
- // SIRT for JNI or References for interpreter.
+ // handle scope for JNI or References for interpreter.
for (size_t reg = 0; reg < num_regs; ++reg) {
mirror::Object* ref = shadow_frame->GetVRegReference(reg);
if (ref != nullptr) {
@@ -2105,7 +2103,7 @@ void Thread::VisitRoots(RootCallback* visitor, void* arg) {
}
tlsPtr_.jni_env->locals.VisitRoots(visitor, arg, thread_id, kRootJNILocal);
tlsPtr_.jni_env->monitors.VisitRoots(visitor, arg, thread_id, kRootJNIMonitor);
- SirtVisitRoots(visitor, arg, thread_id);
+ HandleScopeVisitRoots(visitor, arg, thread_id);
if (tlsPtr_.debug_invoke_req != nullptr) {
tlsPtr_.debug_invoke_req->VisitRoots(visitor, arg, thread_id, kRootDebugger);
}
diff --git a/runtime/thread.h b/runtime/thread.h
index 32311e1..1bbe617 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -31,12 +31,12 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "gc/allocator/rosalloc.h"
#include "globals.h"
+#include "handle_scope.h"
#include "jvalue.h"
#include "object_callbacks.h"
#include "offsets.h"
#include "runtime_stats.h"
#include "stack.h"
-#include "stack_indirect_reference_table.h"
#include "thread_state.h"
#include "throw_location.h"
#include "UniquePtr.h"
@@ -648,35 +648,40 @@ class Thread {
return tlsPtr_.managed_stack.NumJniShadowFrameReferences();
}
- // Number of references in SIRTs on this thread.
- size_t NumSirtReferences();
+ // Number of references in handle scope on this thread.
+ size_t NumHandleReferences();
- // Number of references allocated in SIRTs & JNI shadow frames on this thread.
+ // Number of references allocated in handle scopes & JNI shadow frames on this thread.
size_t NumStackReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return NumSirtReferences() + NumJniShadowFrameReferences();
+ return NumHandleReferences() + NumJniShadowFrameReferences();
};
// Is the given obj in this thread's stack indirect reference table?
- bool SirtContains(jobject obj) const;
+ bool HandleScopeContains(jobject obj) const;
- void SirtVisitRoots(RootCallback* visitor, void* arg, uint32_t thread_id)
+ void HandleScopeVisitRoots(RootCallback* visitor, void* arg, uint32_t thread_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void PushSirt(StackIndirectReferenceTable* sirt) {
- sirt->SetLink(tlsPtr_.top_sirt);
- tlsPtr_.top_sirt = sirt;
+ HandleScope* GetTopHandleScope() {
+ return tlsPtr_.top_handle_scope;
}
- StackIndirectReferenceTable* PopSirt() {
- StackIndirectReferenceTable* sirt = tlsPtr_.top_sirt;
- DCHECK(sirt != NULL);
- tlsPtr_.top_sirt = tlsPtr_.top_sirt->GetLink();
- return sirt;
+ void PushHandleScope(HandleScope* handle_scope) {
+ handle_scope->SetLink(tlsPtr_.top_handle_scope);
+ tlsPtr_.top_handle_scope = handle_scope;
+ }
+
+ HandleScope* PopHandleScope() {
+ HandleScope* handle_scope = tlsPtr_.top_handle_scope;
+ DCHECK(handle_scope != nullptr);
+ tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink();
+ return handle_scope;
}
template<size_t pointer_size>
- static ThreadOffset<pointer_size> TopSirtOffset() {
- return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, top_sirt));
+ static ThreadOffset<pointer_size> TopHandleScopeOffset() {
+ return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
+ top_handle_scope));
}
DebugInvokeReq* GetInvokeReq() const {
@@ -950,7 +955,7 @@ class Thread {
managed_stack(), suspend_trigger(nullptr), jni_env(nullptr), self(nullptr), opeer(nullptr),
jpeer(nullptr), stack_begin(nullptr), stack_size(0), throw_location(),
stack_trace_sample(nullptr), wait_next(nullptr), monitor_enter_object(nullptr),
- top_sirt(nullptr), class_loader_override(nullptr), long_jump_context(nullptr),
+ top_handle_scope(nullptr), class_loader_override(nullptr), long_jump_context(nullptr),
instrumentation_stack(nullptr), debug_invoke_req(nullptr), single_step_control(nullptr),
deoptimization_shadow_frame(nullptr), name(nullptr), pthread_self(0),
last_no_thread_suspension_cause(nullptr), thread_local_start(nullptr),
@@ -1006,8 +1011,8 @@ class Thread {
// If we're blocked in MonitorEnter, this is the object we're trying to lock.
mirror::Object* monitor_enter_object;
- // Top of linked list of stack indirect reference tables or NULL for none.
- StackIndirectReferenceTable* top_sirt;
+ // Top of linked list of handle scopes or nullptr for none.
+ HandleScope* top_handle_scope;
// Needed to get the right ClassLoader in JNI_OnLoad, but also
// useful for testing.
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index b8735a3..23bf294 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -96,7 +96,7 @@ class ThreadPool {
void SetMaxActiveWorkers(size_t threads);
protected:
- // Get a task to run, blocks if there are no tasks left
+ // get a task to run, blocks if there are no tasks left
virtual Task* GetTask(Thread* self);
// Try to get a task, returning NULL if there is none available.
diff --git a/runtime/transaction_test.cc b/runtime/transaction_test.cc
index 1dc2da0..3645ed2 100644
--- a/runtime/transaction_test.cc
+++ b/runtime/transaction_test.cc
@@ -27,56 +27,57 @@ class TransactionTest : public CommonRuntimeTest {};
TEST_F(TransactionTest, Object_class) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::Class> sirt_klass(soa.Self(),
- class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;"));
- ASSERT_TRUE(sirt_klass.get() != nullptr);
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::Class> h_klass(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
+ ASSERT_TRUE(h_klass.Get() != nullptr);
Transaction transaction;
Runtime::Current()->EnterTransactionMode(&transaction);
- SirtRef<mirror::Object> sirt_obj(soa.Self(), sirt_klass->AllocObject(soa.Self()));
- ASSERT_TRUE(sirt_obj.get() != nullptr);
- ASSERT_EQ(sirt_obj->GetClass(), sirt_klass.get());
+ Handle<mirror::Object> h_obj(hs.NewHandle(h_klass->AllocObject(soa.Self())));
+ ASSERT_TRUE(h_obj.Get() != nullptr);
+ ASSERT_EQ(h_obj->GetClass(), h_klass.Get());
Runtime::Current()->ExitTransactionMode();
// Aborting transaction must not clear the Object::class field.
transaction.Abort();
- EXPECT_EQ(sirt_obj->GetClass(), sirt_klass.get());
+ EXPECT_EQ(h_obj->GetClass(), h_klass.Get());
}
TEST_F(TransactionTest, Object_monitor) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::Class> sirt_klass(soa.Self(),
- class_linker_->FindSystemClass(soa.Self(),
- "Ljava/lang/Object;"));
- ASSERT_TRUE(sirt_klass.get() != nullptr);
- SirtRef<mirror::Object> sirt_obj(soa.Self(), sirt_klass->AllocObject(soa.Self()));
- ASSERT_TRUE(sirt_obj.get() != nullptr);
- ASSERT_EQ(sirt_obj->GetClass(), sirt_klass.get());
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::Class> h_klass(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
+ ASSERT_TRUE(h_klass.Get() != nullptr);
+ Handle<mirror::Object> h_obj(hs.NewHandle(h_klass->AllocObject(soa.Self())));
+ ASSERT_TRUE(h_obj.Get() != nullptr);
+ ASSERT_EQ(h_obj->GetClass(), h_klass.Get());
// Lock object's monitor outside the transaction.
- sirt_obj->MonitorEnter(soa.Self());
- uint32_t old_lock_word = sirt_obj->GetLockWord(false).GetValue();
+ h_obj->MonitorEnter(soa.Self());
+ uint32_t old_lock_word = h_obj->GetLockWord(false).GetValue();
Transaction transaction;
Runtime::Current()->EnterTransactionMode(&transaction);
// Unlock object's monitor inside the transaction.
- sirt_obj->MonitorExit(soa.Self());
- uint32_t new_lock_word = sirt_obj->GetLockWord(false).GetValue();
+ h_obj->MonitorExit(soa.Self());
+ uint32_t new_lock_word = h_obj->GetLockWord(false).GetValue();
Runtime::Current()->ExitTransactionMode();
// Aborting transaction must not clear the Object::class field.
transaction.Abort();
- uint32_t aborted_lock_word = sirt_obj->GetLockWord(false).GetValue();
+ uint32_t aborted_lock_word = h_obj->GetLockWord(false).GetValue();
EXPECT_NE(old_lock_word, new_lock_word);
EXPECT_EQ(aborted_lock_word, new_lock_word);
}
TEST_F(TransactionTest, Array_length) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::Class> sirt_klass(soa.Self(),
- class_linker_->FindSystemClass(soa.Self(),
- "[Ljava/lang/Object;"));
- ASSERT_TRUE(sirt_klass.get() != nullptr);
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::Class> h_klass(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
+ ASSERT_TRUE(h_klass.Get() != nullptr);
constexpr int32_t kArraySize = 2;
@@ -84,304 +85,301 @@ TEST_F(TransactionTest, Array_length) {
Runtime::Current()->EnterTransactionMode(&transaction);
// Allocate an array during transaction.
- SirtRef<mirror::Array> sirt_obj(soa.Self(),
- mirror::Array::Alloc<true>(soa.Self(), sirt_klass.get(),
- kArraySize,
- sirt_klass->GetComponentSize(),
- Runtime::Current()->GetHeap()->GetCurrentAllocator()));
- ASSERT_TRUE(sirt_obj.get() != nullptr);
- ASSERT_EQ(sirt_obj->GetClass(), sirt_klass.get());
+ Handle<mirror::Array> h_obj(
+ hs.NewHandle(
+ mirror::Array::Alloc<true>(soa.Self(), h_klass.Get(), kArraySize,
+ h_klass->GetComponentSize(),
+ Runtime::Current()->GetHeap()->GetCurrentAllocator())));
+ ASSERT_TRUE(h_obj.Get() != nullptr);
+ ASSERT_EQ(h_obj->GetClass(), h_klass.Get());
Runtime::Current()->ExitTransactionMode();
// Aborting transaction must not clear the Object::class field.
transaction.Abort();
- EXPECT_EQ(sirt_obj->GetLength(), kArraySize);
+ EXPECT_EQ(h_obj->GetLength(), kArraySize);
}
TEST_F(TransactionTest, StaticFieldsTest) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> class_loader(
- soa.Self(), soa.Decode<mirror::ClassLoader*>(LoadDex("Transaction")));
- ASSERT_TRUE(class_loader.get() != nullptr);
+ StackHandleScope<4> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("Transaction"))));
+ ASSERT_TRUE(class_loader.Get() != nullptr);
- SirtRef<mirror::Class> sirt_klass(soa.Self(),
- class_linker_->FindClass(soa.Self(), "LStaticFieldsTest;",
- class_loader));
- ASSERT_TRUE(sirt_klass.get() != nullptr);
- class_linker_->EnsureInitialized(sirt_klass, true, true);
- ASSERT_TRUE(sirt_klass->IsInitialized());
+ Handle<mirror::Class> h_klass(
+ hs.NewHandle(class_linker_->FindClass(soa.Self(), "LStaticFieldsTest;", class_loader)));
+ ASSERT_TRUE(h_klass.Get() != nullptr);
+ class_linker_->EnsureInitialized(h_klass, true, true);
+ ASSERT_TRUE(h_klass->IsInitialized());
// Lookup fields.
- mirror::ArtField* booleanField = sirt_klass->FindDeclaredStaticField("booleanField", "Z");
+ mirror::ArtField* booleanField = h_klass->FindDeclaredStaticField("booleanField", "Z");
ASSERT_TRUE(booleanField != nullptr);
ASSERT_EQ(FieldHelper(booleanField).GetTypeAsPrimitiveType(), Primitive::kPrimBoolean);
- ASSERT_EQ(booleanField->GetBoolean(sirt_klass.get()), false);
+ ASSERT_EQ(booleanField->GetBoolean(h_klass.Get()), false);
- mirror::ArtField* byteField = sirt_klass->FindDeclaredStaticField("byteField", "B");
+ mirror::ArtField* byteField = h_klass->FindDeclaredStaticField("byteField", "B");
ASSERT_TRUE(byteField != nullptr);
ASSERT_EQ(FieldHelper(byteField).GetTypeAsPrimitiveType(), Primitive::kPrimByte);
- ASSERT_EQ(byteField->GetByte(sirt_klass.get()), 0);
+ ASSERT_EQ(byteField->GetByte(h_klass.Get()), 0);
- mirror::ArtField* charField = sirt_klass->FindDeclaredStaticField("charField", "C");
+ mirror::ArtField* charField = h_klass->FindDeclaredStaticField("charField", "C");
ASSERT_TRUE(charField != nullptr);
ASSERT_EQ(FieldHelper(charField).GetTypeAsPrimitiveType(), Primitive::kPrimChar);
- ASSERT_EQ(charField->GetChar(sirt_klass.get()), 0u);
+ ASSERT_EQ(charField->GetChar(h_klass.Get()), 0u);
- mirror::ArtField* shortField = sirt_klass->FindDeclaredStaticField("shortField", "S");
+ mirror::ArtField* shortField = h_klass->FindDeclaredStaticField("shortField", "S");
ASSERT_TRUE(shortField != nullptr);
ASSERT_EQ(FieldHelper(shortField).GetTypeAsPrimitiveType(), Primitive::kPrimShort);
- ASSERT_EQ(shortField->GetShort(sirt_klass.get()), 0);
+ ASSERT_EQ(shortField->GetShort(h_klass.Get()), 0);
- mirror::ArtField* intField = sirt_klass->FindDeclaredStaticField("intField", "I");
+ mirror::ArtField* intField = h_klass->FindDeclaredStaticField("intField", "I");
ASSERT_TRUE(intField != nullptr);
ASSERT_EQ(FieldHelper(intField).GetTypeAsPrimitiveType(), Primitive::kPrimInt);
- ASSERT_EQ(intField->GetInt(sirt_klass.get()), 0);
+ ASSERT_EQ(intField->GetInt(h_klass.Get()), 0);
- mirror::ArtField* longField = sirt_klass->FindDeclaredStaticField("longField", "J");
+ mirror::ArtField* longField = h_klass->FindDeclaredStaticField("longField", "J");
ASSERT_TRUE(longField != nullptr);
ASSERT_EQ(FieldHelper(longField).GetTypeAsPrimitiveType(), Primitive::kPrimLong);
- ASSERT_EQ(longField->GetLong(sirt_klass.get()), static_cast<int64_t>(0));
+ ASSERT_EQ(longField->GetLong(h_klass.Get()), static_cast<int64_t>(0));
- mirror::ArtField* floatField = sirt_klass->FindDeclaredStaticField("floatField", "F");
+ mirror::ArtField* floatField = h_klass->FindDeclaredStaticField("floatField", "F");
ASSERT_TRUE(floatField != nullptr);
ASSERT_EQ(FieldHelper(floatField).GetTypeAsPrimitiveType(), Primitive::kPrimFloat);
- ASSERT_EQ(floatField->GetFloat(sirt_klass.get()), static_cast<float>(0.0f));
+ ASSERT_EQ(floatField->GetFloat(h_klass.Get()), static_cast<float>(0.0f));
- mirror::ArtField* doubleField = sirt_klass->FindDeclaredStaticField("doubleField", "D");
+ mirror::ArtField* doubleField = h_klass->FindDeclaredStaticField("doubleField", "D");
ASSERT_TRUE(doubleField != nullptr);
ASSERT_EQ(FieldHelper(doubleField).GetTypeAsPrimitiveType(), Primitive::kPrimDouble);
- ASSERT_EQ(doubleField->GetDouble(sirt_klass.get()), static_cast<double>(0.0));
+ ASSERT_EQ(doubleField->GetDouble(h_klass.Get()), static_cast<double>(0.0));
- mirror::ArtField* objectField = sirt_klass->FindDeclaredStaticField("objectField",
+ mirror::ArtField* objectField = h_klass->FindDeclaredStaticField("objectField",
"Ljava/lang/Object;");
ASSERT_TRUE(objectField != nullptr);
ASSERT_EQ(FieldHelper(objectField).GetTypeAsPrimitiveType(), Primitive::kPrimNot);
- ASSERT_EQ(objectField->GetObject(sirt_klass.get()), nullptr);
+ ASSERT_EQ(objectField->GetObject(h_klass.Get()), nullptr);
// Create a java.lang.Object instance to set objectField.
- SirtRef<mirror::Class> object_klass(soa.Self(),
- class_linker_->FindSystemClass(soa.Self(),
- "Ljava/lang/Object;"));
- ASSERT_TRUE(object_klass.get() != nullptr);
- SirtRef<mirror::Object> sirt_obj(soa.Self(), sirt_klass->AllocObject(soa.Self()));
- ASSERT_TRUE(sirt_obj.get() != nullptr);
- ASSERT_EQ(sirt_obj->GetClass(), sirt_klass.get());
+ Handle<mirror::Class> object_klass(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
+ ASSERT_TRUE(object_klass.Get() != nullptr);
+ Handle<mirror::Object> h_obj(hs.NewHandle(h_klass->AllocObject(soa.Self())));
+ ASSERT_TRUE(h_obj.Get() != nullptr);
+ ASSERT_EQ(h_obj->GetClass(), h_klass.Get());
// Modify fields inside transaction and abort it.
Transaction transaction;
Runtime::Current()->EnterTransactionMode(&transaction);
- booleanField->SetBoolean<true>(sirt_klass.get(), true);
- byteField->SetByte<true>(sirt_klass.get(), 1);
- charField->SetChar<true>(sirt_klass.get(), 1u);
- shortField->SetShort<true>(sirt_klass.get(), 1);
- intField->SetInt<true>(sirt_klass.get(), 1);
- longField->SetLong<true>(sirt_klass.get(), 1);
- floatField->SetFloat<true>(sirt_klass.get(), 1.0);
- doubleField->SetDouble<true>(sirt_klass.get(), 1.0);
- objectField->SetObject<true>(sirt_klass.get(), sirt_obj.get());
+ booleanField->SetBoolean<true>(h_klass.Get(), true);
+ byteField->SetByte<true>(h_klass.Get(), 1);
+ charField->SetChar<true>(h_klass.Get(), 1u);
+ shortField->SetShort<true>(h_klass.Get(), 1);
+ intField->SetInt<true>(h_klass.Get(), 1);
+ longField->SetLong<true>(h_klass.Get(), 1);
+ floatField->SetFloat<true>(h_klass.Get(), 1.0);
+ doubleField->SetDouble<true>(h_klass.Get(), 1.0);
+ objectField->SetObject<true>(h_klass.Get(), h_obj.Get());
Runtime::Current()->ExitTransactionMode();
transaction.Abort();
// Check values have properly been restored to their original (default) value.
- EXPECT_EQ(booleanField->GetBoolean(sirt_klass.get()), false);
- EXPECT_EQ(byteField->GetByte(sirt_klass.get()), 0);
- EXPECT_EQ(charField->GetChar(sirt_klass.get()), 0u);
- EXPECT_EQ(shortField->GetShort(sirt_klass.get()), 0);
- EXPECT_EQ(intField->GetInt(sirt_klass.get()), 0);
- EXPECT_EQ(longField->GetLong(sirt_klass.get()), static_cast<int64_t>(0));
- EXPECT_EQ(floatField->GetFloat(sirt_klass.get()), static_cast<float>(0.0f));
- EXPECT_EQ(doubleField->GetDouble(sirt_klass.get()), static_cast<double>(0.0));
- EXPECT_EQ(objectField->GetObject(sirt_klass.get()), nullptr);
+ EXPECT_EQ(booleanField->GetBoolean(h_klass.Get()), false);
+ EXPECT_EQ(byteField->GetByte(h_klass.Get()), 0);
+ EXPECT_EQ(charField->GetChar(h_klass.Get()), 0u);
+ EXPECT_EQ(shortField->GetShort(h_klass.Get()), 0);
+ EXPECT_EQ(intField->GetInt(h_klass.Get()), 0);
+ EXPECT_EQ(longField->GetLong(h_klass.Get()), static_cast<int64_t>(0));
+ EXPECT_EQ(floatField->GetFloat(h_klass.Get()), static_cast<float>(0.0f));
+ EXPECT_EQ(doubleField->GetDouble(h_klass.Get()), static_cast<double>(0.0));
+ EXPECT_EQ(objectField->GetObject(h_klass.Get()), nullptr);
}
TEST_F(TransactionTest, InstanceFieldsTest) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> class_loader(
- soa.Self(), soa.Decode<mirror::ClassLoader*>(LoadDex("Transaction")));
- ASSERT_TRUE(class_loader.get() != nullptr);
+ StackHandleScope<5> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("Transaction"))));
+ ASSERT_TRUE(class_loader.Get() != nullptr);
- SirtRef<mirror::Class> sirt_klass(soa.Self(),
- class_linker_->FindClass(soa.Self(), "LInstanceFieldsTest;",
- class_loader));
- ASSERT_TRUE(sirt_klass.get() != nullptr);
- class_linker_->EnsureInitialized(sirt_klass, true, true);
- ASSERT_TRUE(sirt_klass->IsInitialized());
+ Handle<mirror::Class> h_klass(
+ hs.NewHandle(class_linker_->FindClass(soa.Self(), "LInstanceFieldsTest;", class_loader)));
+ ASSERT_TRUE(h_klass.Get() != nullptr);
+ class_linker_->EnsureInitialized(h_klass, true, true);
+ ASSERT_TRUE(h_klass->IsInitialized());
// Allocate an InstanceFieldTest object.
- SirtRef<mirror::Object> sirt_instance(soa.Self(), sirt_klass->AllocObject(soa.Self()));
- ASSERT_TRUE(sirt_instance.get() != nullptr);
+ Handle<mirror::Object> h_instance(hs.NewHandle(h_klass->AllocObject(soa.Self())));
+ ASSERT_TRUE(h_instance.Get() != nullptr);
// Lookup fields.
- mirror::ArtField* booleanField = sirt_klass->FindDeclaredInstanceField("booleanField", "Z");
+ mirror::ArtField* booleanField = h_klass->FindDeclaredInstanceField("booleanField", "Z");
ASSERT_TRUE(booleanField != nullptr);
ASSERT_EQ(FieldHelper(booleanField).GetTypeAsPrimitiveType(), Primitive::kPrimBoolean);
- ASSERT_EQ(booleanField->GetBoolean(sirt_instance.get()), false);
+ ASSERT_EQ(booleanField->GetBoolean(h_instance.Get()), false);
- mirror::ArtField* byteField = sirt_klass->FindDeclaredInstanceField("byteField", "B");
+ mirror::ArtField* byteField = h_klass->FindDeclaredInstanceField("byteField", "B");
ASSERT_TRUE(byteField != nullptr);
ASSERT_EQ(FieldHelper(byteField).GetTypeAsPrimitiveType(), Primitive::kPrimByte);
- ASSERT_EQ(byteField->GetByte(sirt_instance.get()), 0);
+ ASSERT_EQ(byteField->GetByte(h_instance.Get()), 0);
- mirror::ArtField* charField = sirt_klass->FindDeclaredInstanceField("charField", "C");
+ mirror::ArtField* charField = h_klass->FindDeclaredInstanceField("charField", "C");
ASSERT_TRUE(charField != nullptr);
ASSERT_EQ(FieldHelper(charField).GetTypeAsPrimitiveType(), Primitive::kPrimChar);
- ASSERT_EQ(charField->GetChar(sirt_instance.get()), 0u);
+ ASSERT_EQ(charField->GetChar(h_instance.Get()), 0u);
- mirror::ArtField* shortField = sirt_klass->FindDeclaredInstanceField("shortField", "S");
+ mirror::ArtField* shortField = h_klass->FindDeclaredInstanceField("shortField", "S");
ASSERT_TRUE(shortField != nullptr);
ASSERT_EQ(FieldHelper(shortField).GetTypeAsPrimitiveType(), Primitive::kPrimShort);
- ASSERT_EQ(shortField->GetShort(sirt_instance.get()), 0);
+ ASSERT_EQ(shortField->GetShort(h_instance.Get()), 0);
- mirror::ArtField* intField = sirt_klass->FindDeclaredInstanceField("intField", "I");
+ mirror::ArtField* intField = h_klass->FindDeclaredInstanceField("intField", "I");
ASSERT_TRUE(intField != nullptr);
ASSERT_EQ(FieldHelper(intField).GetTypeAsPrimitiveType(), Primitive::kPrimInt);
- ASSERT_EQ(intField->GetInt(sirt_instance.get()), 0);
+ ASSERT_EQ(intField->GetInt(h_instance.Get()), 0);
- mirror::ArtField* longField = sirt_klass->FindDeclaredInstanceField("longField", "J");
+ mirror::ArtField* longField = h_klass->FindDeclaredInstanceField("longField", "J");
ASSERT_TRUE(longField != nullptr);
ASSERT_EQ(FieldHelper(longField).GetTypeAsPrimitiveType(), Primitive::kPrimLong);
- ASSERT_EQ(longField->GetLong(sirt_instance.get()), static_cast<int64_t>(0));
+ ASSERT_EQ(longField->GetLong(h_instance.Get()), static_cast<int64_t>(0));
- mirror::ArtField* floatField = sirt_klass->FindDeclaredInstanceField("floatField", "F");
+ mirror::ArtField* floatField = h_klass->FindDeclaredInstanceField("floatField", "F");
ASSERT_TRUE(floatField != nullptr);
ASSERT_EQ(FieldHelper(floatField).GetTypeAsPrimitiveType(), Primitive::kPrimFloat);
- ASSERT_EQ(floatField->GetFloat(sirt_instance.get()), static_cast<float>(0.0f));
+ ASSERT_EQ(floatField->GetFloat(h_instance.Get()), static_cast<float>(0.0f));
- mirror::ArtField* doubleField = sirt_klass->FindDeclaredInstanceField("doubleField", "D");
+ mirror::ArtField* doubleField = h_klass->FindDeclaredInstanceField("doubleField", "D");
ASSERT_TRUE(doubleField != nullptr);
ASSERT_EQ(FieldHelper(doubleField).GetTypeAsPrimitiveType(), Primitive::kPrimDouble);
- ASSERT_EQ(doubleField->GetDouble(sirt_instance.get()), static_cast<double>(0.0));
+ ASSERT_EQ(doubleField->GetDouble(h_instance.Get()), static_cast<double>(0.0));
- mirror::ArtField* objectField = sirt_klass->FindDeclaredInstanceField("objectField",
+ mirror::ArtField* objectField = h_klass->FindDeclaredInstanceField("objectField",
"Ljava/lang/Object;");
ASSERT_TRUE(objectField != nullptr);
ASSERT_EQ(FieldHelper(objectField).GetTypeAsPrimitiveType(), Primitive::kPrimNot);
- ASSERT_EQ(objectField->GetObject(sirt_instance.get()), nullptr);
+ ASSERT_EQ(objectField->GetObject(h_instance.Get()), nullptr);
// Create a java.lang.Object instance to set objectField.
- SirtRef<mirror::Class> object_klass(soa.Self(),
- class_linker_->FindSystemClass(soa.Self(),
- "Ljava/lang/Object;"));
- ASSERT_TRUE(object_klass.get() != nullptr);
- SirtRef<mirror::Object> sirt_obj(soa.Self(), sirt_klass->AllocObject(soa.Self()));
- ASSERT_TRUE(sirt_obj.get() != nullptr);
- ASSERT_EQ(sirt_obj->GetClass(), sirt_klass.get());
+ Handle<mirror::Class> object_klass(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
+ ASSERT_TRUE(object_klass.Get() != nullptr);
+ Handle<mirror::Object> h_obj(hs.NewHandle(h_klass->AllocObject(soa.Self())));
+ ASSERT_TRUE(h_obj.Get() != nullptr);
+ ASSERT_EQ(h_obj->GetClass(), h_klass.Get());
// Modify fields inside transaction and abort it.
Transaction transaction;
Runtime::Current()->EnterTransactionMode(&transaction);
- booleanField->SetBoolean<true>(sirt_instance.get(), true);
- byteField->SetByte<true>(sirt_instance.get(), 1);
- charField->SetChar<true>(sirt_instance.get(), 1u);
- shortField->SetShort<true>(sirt_instance.get(), 1);
- intField->SetInt<true>(sirt_instance.get(), 1);
- longField->SetLong<true>(sirt_instance.get(), 1);
- floatField->SetFloat<true>(sirt_instance.get(), 1.0);
- doubleField->SetDouble<true>(sirt_instance.get(), 1.0);
- objectField->SetObject<true>(sirt_instance.get(), sirt_obj.get());
+ booleanField->SetBoolean<true>(h_instance.Get(), true);
+ byteField->SetByte<true>(h_instance.Get(), 1);
+ charField->SetChar<true>(h_instance.Get(), 1u);
+ shortField->SetShort<true>(h_instance.Get(), 1);
+ intField->SetInt<true>(h_instance.Get(), 1);
+ longField->SetLong<true>(h_instance.Get(), 1);
+ floatField->SetFloat<true>(h_instance.Get(), 1.0);
+ doubleField->SetDouble<true>(h_instance.Get(), 1.0);
+ objectField->SetObject<true>(h_instance.Get(), h_obj.Get());
Runtime::Current()->ExitTransactionMode();
transaction.Abort();
// Check values have properly been restored to their original (default) value.
- EXPECT_EQ(booleanField->GetBoolean(sirt_instance.get()), false);
- EXPECT_EQ(byteField->GetByte(sirt_instance.get()), 0);
- EXPECT_EQ(charField->GetChar(sirt_instance.get()), 0u);
- EXPECT_EQ(shortField->GetShort(sirt_instance.get()), 0);
- EXPECT_EQ(intField->GetInt(sirt_instance.get()), 0);
- EXPECT_EQ(longField->GetLong(sirt_instance.get()), static_cast<int64_t>(0));
- EXPECT_EQ(floatField->GetFloat(sirt_instance.get()), static_cast<float>(0.0f));
- EXPECT_EQ(doubleField->GetDouble(sirt_instance.get()), static_cast<double>(0.0));
- EXPECT_EQ(objectField->GetObject(sirt_instance.get()), nullptr);
+ EXPECT_EQ(booleanField->GetBoolean(h_instance.Get()), false);
+ EXPECT_EQ(byteField->GetByte(h_instance.Get()), 0);
+ EXPECT_EQ(charField->GetChar(h_instance.Get()), 0u);
+ EXPECT_EQ(shortField->GetShort(h_instance.Get()), 0);
+ EXPECT_EQ(intField->GetInt(h_instance.Get()), 0);
+ EXPECT_EQ(longField->GetLong(h_instance.Get()), static_cast<int64_t>(0));
+ EXPECT_EQ(floatField->GetFloat(h_instance.Get()), static_cast<float>(0.0f));
+ EXPECT_EQ(doubleField->GetDouble(h_instance.Get()), static_cast<double>(0.0));
+ EXPECT_EQ(objectField->GetObject(h_instance.Get()), nullptr);
}
TEST_F(TransactionTest, StaticArrayFieldsTest) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> class_loader(
- soa.Self(), soa.Decode<mirror::ClassLoader*>(LoadDex("Transaction")));
- ASSERT_TRUE(class_loader.get() != nullptr);
+ StackHandleScope<4> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("Transaction"))));
+ ASSERT_TRUE(class_loader.Get() != nullptr);
- SirtRef<mirror::Class> sirt_klass(soa.Self(),
- class_linker_->FindClass(soa.Self(), "LStaticArrayFieldsTest;",
- class_loader));
- ASSERT_TRUE(sirt_klass.get() != nullptr);
- class_linker_->EnsureInitialized(sirt_klass, true, true);
- ASSERT_TRUE(sirt_klass->IsInitialized());
+ Handle<mirror::Class> h_klass(
+ hs.NewHandle(class_linker_->FindClass(soa.Self(), "LStaticArrayFieldsTest;", class_loader)));
+ ASSERT_TRUE(h_klass.Get() != nullptr);
+ class_linker_->EnsureInitialized(h_klass, true, true);
+ ASSERT_TRUE(h_klass->IsInitialized());
// Lookup fields.
- mirror::ArtField* booleanArrayField = sirt_klass->FindDeclaredStaticField("booleanArrayField", "[Z");
+ mirror::ArtField* booleanArrayField = h_klass->FindDeclaredStaticField("booleanArrayField", "[Z");
ASSERT_TRUE(booleanArrayField != nullptr);
- mirror::BooleanArray* booleanArray = booleanArrayField->GetObject(sirt_klass.get())->AsBooleanArray();
+ mirror::BooleanArray* booleanArray = booleanArrayField->GetObject(h_klass.Get())->AsBooleanArray();
ASSERT_TRUE(booleanArray != nullptr);
ASSERT_EQ(booleanArray->GetLength(), 1);
ASSERT_EQ(booleanArray->GetWithoutChecks(0), false);
- mirror::ArtField* byteArrayField = sirt_klass->FindDeclaredStaticField("byteArrayField", "[B");
+ mirror::ArtField* byteArrayField = h_klass->FindDeclaredStaticField("byteArrayField", "[B");
ASSERT_TRUE(byteArrayField != nullptr);
- mirror::ByteArray* byteArray = byteArrayField->GetObject(sirt_klass.get())->AsByteArray();
+ mirror::ByteArray* byteArray = byteArrayField->GetObject(h_klass.Get())->AsByteArray();
ASSERT_TRUE(byteArray != nullptr);
ASSERT_EQ(byteArray->GetLength(), 1);
ASSERT_EQ(byteArray->GetWithoutChecks(0), 0);
- mirror::ArtField* charArrayField = sirt_klass->FindDeclaredStaticField("charArrayField", "[C");
+ mirror::ArtField* charArrayField = h_klass->FindDeclaredStaticField("charArrayField", "[C");
ASSERT_TRUE(charArrayField != nullptr);
- mirror::CharArray* charArray = charArrayField->GetObject(sirt_klass.get())->AsCharArray();
+ mirror::CharArray* charArray = charArrayField->GetObject(h_klass.Get())->AsCharArray();
ASSERT_TRUE(charArray != nullptr);
ASSERT_EQ(charArray->GetLength(), 1);
ASSERT_EQ(charArray->GetWithoutChecks(0), 0u);
- mirror::ArtField* shortArrayField = sirt_klass->FindDeclaredStaticField("shortArrayField", "[S");
+ mirror::ArtField* shortArrayField = h_klass->FindDeclaredStaticField("shortArrayField", "[S");
ASSERT_TRUE(shortArrayField != nullptr);
- mirror::ShortArray* shortArray = shortArrayField->GetObject(sirt_klass.get())->AsShortArray();
+ mirror::ShortArray* shortArray = shortArrayField->GetObject(h_klass.Get())->AsShortArray();
ASSERT_TRUE(shortArray != nullptr);
ASSERT_EQ(shortArray->GetLength(), 1);
ASSERT_EQ(shortArray->GetWithoutChecks(0), 0);
- mirror::ArtField* intArrayField = sirt_klass->FindDeclaredStaticField("intArrayField", "[I");
+ mirror::ArtField* intArrayField = h_klass->FindDeclaredStaticField("intArrayField", "[I");
ASSERT_TRUE(intArrayField != nullptr);
- mirror::IntArray* intArray = intArrayField->GetObject(sirt_klass.get())->AsIntArray();
+ mirror::IntArray* intArray = intArrayField->GetObject(h_klass.Get())->AsIntArray();
ASSERT_TRUE(intArray != nullptr);
ASSERT_EQ(intArray->GetLength(), 1);
ASSERT_EQ(intArray->GetWithoutChecks(0), 0);
- mirror::ArtField* longArrayField = sirt_klass->FindDeclaredStaticField("longArrayField", "[J");
+ mirror::ArtField* longArrayField = h_klass->FindDeclaredStaticField("longArrayField", "[J");
ASSERT_TRUE(longArrayField != nullptr);
- mirror::LongArray* longArray = longArrayField->GetObject(sirt_klass.get())->AsLongArray();
+ mirror::LongArray* longArray = longArrayField->GetObject(h_klass.Get())->AsLongArray();
ASSERT_TRUE(longArray != nullptr);
ASSERT_EQ(longArray->GetLength(), 1);
ASSERT_EQ(longArray->GetWithoutChecks(0), static_cast<int64_t>(0));
- mirror::ArtField* floatArrayField = sirt_klass->FindDeclaredStaticField("floatArrayField", "[F");
+ mirror::ArtField* floatArrayField = h_klass->FindDeclaredStaticField("floatArrayField", "[F");
ASSERT_TRUE(floatArrayField != nullptr);
- mirror::FloatArray* floatArray = floatArrayField->GetObject(sirt_klass.get())->AsFloatArray();
+ mirror::FloatArray* floatArray = floatArrayField->GetObject(h_klass.Get())->AsFloatArray();
ASSERT_TRUE(floatArray != nullptr);
ASSERT_EQ(floatArray->GetLength(), 1);
ASSERT_EQ(floatArray->GetWithoutChecks(0), static_cast<float>(0.0f));
- mirror::ArtField* doubleArrayField = sirt_klass->FindDeclaredStaticField("doubleArrayField", "[D");
+ mirror::ArtField* doubleArrayField = h_klass->FindDeclaredStaticField("doubleArrayField", "[D");
ASSERT_TRUE(doubleArrayField != nullptr);
- mirror::DoubleArray* doubleArray = doubleArrayField->GetObject(sirt_klass.get())->AsDoubleArray();
+ mirror::DoubleArray* doubleArray = doubleArrayField->GetObject(h_klass.Get())->AsDoubleArray();
ASSERT_TRUE(doubleArray != nullptr);
ASSERT_EQ(doubleArray->GetLength(), 1);
ASSERT_EQ(doubleArray->GetWithoutChecks(0), static_cast<double>(0.0f));
- mirror::ArtField* objectArrayField = sirt_klass->FindDeclaredStaticField("objectArrayField",
+ mirror::ArtField* objectArrayField = h_klass->FindDeclaredStaticField("objectArrayField",
"[Ljava/lang/Object;");
ASSERT_TRUE(objectArrayField != nullptr);
mirror::ObjectArray<mirror::Object>* objectArray =
- objectArrayField->GetObject(sirt_klass.get())->AsObjectArray<mirror::Object>();
+ objectArrayField->GetObject(h_klass.Get())->AsObjectArray<mirror::Object>();
ASSERT_TRUE(objectArray != nullptr);
ASSERT_EQ(objectArray->GetLength(), 1);
ASSERT_EQ(objectArray->GetWithoutChecks(0), nullptr);
// Create a java.lang.Object instance to set objectField.
- SirtRef<mirror::Class> object_klass(soa.Self(),
- class_linker_->FindSystemClass(soa.Self(),
- "Ljava/lang/Object;"));
- ASSERT_TRUE(object_klass.get() != nullptr);
- SirtRef<mirror::Object> sirt_obj(soa.Self(), sirt_klass->AllocObject(soa.Self()));
- ASSERT_TRUE(sirt_obj.get() != nullptr);
- ASSERT_EQ(sirt_obj->GetClass(), sirt_klass.get());
+ Handle<mirror::Class> object_klass(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
+ ASSERT_TRUE(object_klass.Get() != nullptr);
+ Handle<mirror::Object> h_obj(hs.NewHandle(h_klass->AllocObject(soa.Self())));
+ ASSERT_TRUE(h_obj.Get() != nullptr);
+ ASSERT_EQ(h_obj->GetClass(), h_klass.Get());
// Modify fields inside transaction and abort it.
Transaction transaction;
@@ -394,7 +392,7 @@ TEST_F(TransactionTest, StaticArrayFieldsTest) {
longArray->SetWithoutChecks<true>(0, 1);
floatArray->SetWithoutChecks<true>(0, 1.0);
doubleArray->SetWithoutChecks<true>(0, 1.0);
- objectArray->SetWithoutChecks<true>(0, sirt_obj.get());
+ objectArray->SetWithoutChecks<true>(0, h_obj.Get());
Runtime::Current()->ExitTransactionMode();
transaction.Abort();
@@ -412,42 +410,41 @@ TEST_F(TransactionTest, StaticArrayFieldsTest) {
TEST_F(TransactionTest, EmptyClass) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> class_loader(
- soa.Self(), soa.Decode<mirror::ClassLoader*>(LoadDex("Transaction")));
- ASSERT_TRUE(class_loader.get() != nullptr);
-
- SirtRef<mirror::Class> sirt_klass(soa.Self(),
- class_linker_->FindClass(soa.Self(),
- "LTransaction$EmptyStatic;",
- class_loader));
- ASSERT_TRUE(sirt_klass.get() != nullptr);
- class_linker_->VerifyClass(sirt_klass);
- ASSERT_TRUE(sirt_klass->IsVerified());
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("Transaction"))));
+ ASSERT_TRUE(class_loader.Get() != nullptr);
+
+ Handle<mirror::Class> h_klass(
+ hs.NewHandle(class_linker_->FindClass(soa.Self(), "LTransaction$EmptyStatic;", class_loader)));
+ ASSERT_TRUE(h_klass.Get() != nullptr);
+ class_linker_->VerifyClass(h_klass);
+ ASSERT_TRUE(h_klass->IsVerified());
Transaction transaction;
Runtime::Current()->EnterTransactionMode(&transaction);
- class_linker_->EnsureInitialized(sirt_klass, true, true);
+ class_linker_->EnsureInitialized(h_klass, true, true);
Runtime::Current()->ExitTransactionMode();
ASSERT_FALSE(soa.Self()->IsExceptionPending());
}
TEST_F(TransactionTest, StaticFieldClass) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> class_loader(
- soa.Self(), soa.Decode<mirror::ClassLoader*>(LoadDex("Transaction")));
- ASSERT_TRUE(class_loader.get() != nullptr);
-
- SirtRef<mirror::Class> sirt_klass(soa.Self(),
- class_linker_->FindClass(soa.Self(),
- "LTransaction$StaticFieldClass;",
- class_loader));
- ASSERT_TRUE(sirt_klass.get() != nullptr);
- class_linker_->VerifyClass(sirt_klass);
- ASSERT_TRUE(sirt_klass->IsVerified());
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("Transaction"))));
+ ASSERT_TRUE(class_loader.Get() != nullptr);
+
+ Handle<mirror::Class> h_klass(
+ hs.NewHandle(class_linker_->FindClass(soa.Self(), "LTransaction$StaticFieldClass;",
+ class_loader)));
+ ASSERT_TRUE(h_klass.Get() != nullptr);
+ class_linker_->VerifyClass(h_klass);
+ ASSERT_TRUE(h_klass->IsVerified());
Transaction transaction;
Runtime::Current()->EnterTransactionMode(&transaction);
- class_linker_->EnsureInitialized(sirt_klass, true, true);
+ class_linker_->EnsureInitialized(h_klass, true, true);
Runtime::Current()->ExitTransactionMode();
ASSERT_FALSE(soa.Self()->IsExceptionPending());
}
@@ -455,39 +452,40 @@ TEST_F(TransactionTest, StaticFieldClass) {
TEST_F(TransactionTest, BlacklistedClass) {
ScopedObjectAccess soa(Thread::Current());
jobject jclass_loader = LoadDex("Transaction");
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(jclass_loader));
- ASSERT_TRUE(class_loader.get() != nullptr);
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+ ASSERT_TRUE(class_loader.Get() != nullptr);
// Load and verify java.lang.ExceptionInInitializerError and java.lang.InternalError which will
// be thrown by class initialization due to native call.
- SirtRef<mirror::Class> sirt_klass(soa.Self(),
- class_linker_->FindSystemClass(soa.Self(),
- "Ljava/lang/ExceptionInInitializerError;"));
- ASSERT_TRUE(sirt_klass.get() != nullptr);
- class_linker_->VerifyClass(sirt_klass);
- ASSERT_TRUE(sirt_klass->IsVerified());
- sirt_klass.reset(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/InternalError;"));
- ASSERT_TRUE(sirt_klass.get() != nullptr);
- class_linker_->VerifyClass(sirt_klass);
- ASSERT_TRUE(sirt_klass->IsVerified());
+ Handle<mirror::Class> h_klass(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(),
+ "Ljava/lang/ExceptionInInitializerError;")));
+ ASSERT_TRUE(h_klass.Get() != nullptr);
+ class_linker_->VerifyClass(h_klass);
+ ASSERT_TRUE(h_klass->IsVerified());
+ h_klass.Assign(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/InternalError;"));
+ ASSERT_TRUE(h_klass.Get() != nullptr);
+ class_linker_->VerifyClass(h_klass);
+ ASSERT_TRUE(h_klass->IsVerified());
// Load and verify Transaction$NativeSupport used in class initialization.
- sirt_klass.reset(class_linker_->FindClass(soa.Self(), "LTransaction$NativeSupport;",
- class_loader));
- ASSERT_TRUE(sirt_klass.get() != nullptr);
- class_linker_->VerifyClass(sirt_klass);
- ASSERT_TRUE(sirt_klass->IsVerified());
-
- sirt_klass.reset(class_linker_->FindClass(soa.Self(), "LTransaction$BlacklistedClass;",
- class_loader));
- ASSERT_TRUE(sirt_klass.get() != nullptr);
- class_linker_->VerifyClass(sirt_klass);
- ASSERT_TRUE(sirt_klass->IsVerified());
+ h_klass.Assign(class_linker_->FindClass(soa.Self(), "LTransaction$NativeSupport;",
+ class_loader));
+ ASSERT_TRUE(h_klass.Get() != nullptr);
+ class_linker_->VerifyClass(h_klass);
+ ASSERT_TRUE(h_klass->IsVerified());
+
+ h_klass.Assign(class_linker_->FindClass(soa.Self(), "LTransaction$BlacklistedClass;",
+ class_loader));
+ ASSERT_TRUE(h_klass.Get() != nullptr);
+ class_linker_->VerifyClass(h_klass);
+ ASSERT_TRUE(h_klass->IsVerified());
Transaction transaction;
Runtime::Current()->EnterTransactionMode(&transaction);
- class_linker_->EnsureInitialized(sirt_klass, true, true);
+ class_linker_->EnsureInitialized(h_klass, true, true);
Runtime::Current()->ExitTransactionMode();
ASSERT_TRUE(soa.Self()->IsExceptionPending());
}
diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc
index d425620..8a8834d 100644
--- a/runtime/utils_test.cc
+++ b/runtime/utils_test.cc
@@ -23,7 +23,7 @@
#include "mirror/object_array-inl.h"
#include "mirror/string.h"
#include "scoped_thread_state_change.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
#include <valgrind.h>
@@ -95,11 +95,12 @@ TEST_F(UtilsTest, PrettyTypeOf) {
ScopedObjectAccess soa(Thread::Current());
EXPECT_EQ("null", PrettyTypeOf(NULL));
- SirtRef<mirror::String> s(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(), ""));
- EXPECT_EQ("java.lang.String", PrettyTypeOf(s.get()));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::String> s(hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "")));
+ EXPECT_EQ("java.lang.String", PrettyTypeOf(s.Get()));
- SirtRef<mirror::ShortArray> a(soa.Self(), mirror::ShortArray::Alloc(soa.Self(), 2));
- EXPECT_EQ("short[]", PrettyTypeOf(a.get()));
+ Handle<mirror::ShortArray> a(hs.NewHandle(mirror::ShortArray::Alloc(soa.Self(), 2)));
+ EXPECT_EQ("short[]", PrettyTypeOf(a.Get()));
mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/String;");
ASSERT_TRUE(c != NULL);
diff --git a/runtime/verifier/method_verifier-inl.h b/runtime/verifier/method_verifier-inl.h
index c554394..62ecf4b 100644
--- a/runtime/verifier/method_verifier-inl.h
+++ b/runtime/verifier/method_verifier-inl.h
@@ -21,7 +21,7 @@
#include "method_verifier.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
namespace art {
namespace verifier {
@@ -39,11 +39,11 @@ inline const InstructionFlags& MethodVerifier::GetInstructionFlags(size_t index)
}
inline mirror::ClassLoader* MethodVerifier::GetClassLoader() {
- return class_loader_->get();
+ return class_loader_->Get();
}
inline mirror::DexCache* MethodVerifier::GetDexCache() {
- return dex_cache_->get();
+ return dex_cache_->Get();
}
inline MethodReference MethodVerifier::GetMethodReference() const {
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 41ff96e..9dd366d 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -40,7 +40,7 @@
#include "register_line-inl.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
#include "verifier/dex_gc_map.h"
namespace art {
@@ -115,15 +115,15 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(mirror::Class* klass,
}
return kHardFailure;
}
- Thread* self = Thread::Current();
- SirtRef<mirror::DexCache> dex_cache(self, kh.GetDexCache());
- SirtRef<mirror::ClassLoader> class_loader(self, klass->GetClassLoader());
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(kh.GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader()));
return VerifyClass(&dex_file, dex_cache, class_loader, class_def, allow_soft_failures, error);
}
MethodVerifier::FailureKind MethodVerifier::VerifyClass(const DexFile* dex_file,
- SirtRef<mirror::DexCache>& dex_cache,
- SirtRef<mirror::ClassLoader>& class_loader,
+ Handle<mirror::DexCache>& dex_cache,
+ Handle<mirror::ClassLoader>& class_loader,
const DexFile::ClassDef* class_def,
bool allow_soft_failures,
std::string* error) {
@@ -233,8 +233,8 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(const DexFile* dex_file,
MethodVerifier::FailureKind MethodVerifier::VerifyMethod(uint32_t method_idx,
const DexFile* dex_file,
- SirtRef<mirror::DexCache>& dex_cache,
- SirtRef<mirror::ClassLoader>& class_loader,
+ Handle<mirror::DexCache>& dex_cache,
+ Handle<mirror::ClassLoader>& class_loader,
const DexFile::ClassDef* class_def,
const DexFile::CodeItem* code_item,
mirror::ArtMethod* method,
@@ -278,8 +278,8 @@ MethodVerifier::FailureKind MethodVerifier::VerifyMethod(uint32_t method_idx,
void MethodVerifier::VerifyMethodAndDump(std::ostream& os, uint32_t dex_method_idx,
const DexFile* dex_file,
- SirtRef<mirror::DexCache>& dex_cache,
- SirtRef<mirror::ClassLoader>& class_loader,
+ Handle<mirror::DexCache>& dex_cache,
+ Handle<mirror::ClassLoader>& class_loader,
const DexFile::ClassDef* class_def,
const DexFile::CodeItem* code_item,
mirror::ArtMethod* method,
@@ -292,8 +292,8 @@ void MethodVerifier::VerifyMethodAndDump(std::ostream& os, uint32_t dex_method_i
verifier.Dump(os);
}
-MethodVerifier::MethodVerifier(const DexFile* dex_file, SirtRef<mirror::DexCache>* dex_cache,
- SirtRef<mirror::ClassLoader>* class_loader,
+MethodVerifier::MethodVerifier(const DexFile* dex_file, Handle<mirror::DexCache>* dex_cache,
+ Handle<mirror::ClassLoader>* class_loader,
const DexFile::ClassDef* class_def,
const DexFile::CodeItem* code_item, uint32_t dex_method_idx,
mirror::ArtMethod* method, uint32_t method_access_flags,
@@ -332,9 +332,9 @@ MethodVerifier::~MethodVerifier() {
void MethodVerifier::FindLocksAtDexPc(mirror::ArtMethod* m, uint32_t dex_pc,
std::vector<uint32_t>& monitor_enter_dex_pcs) {
MethodHelper mh(m);
- Thread* self = Thread::Current();
- SirtRef<mirror::DexCache> dex_cache(self, mh.GetDexCache());
- SirtRef<mirror::ClassLoader> class_loader(self, mh.GetClassLoader());
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(mh.GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(mh.GetClassLoader()));
MethodVerifier verifier(&mh.GetDexFile(), &dex_cache, &class_loader, &mh.GetClassDef(),
mh.GetCodeItem(), m->GetDexMethodIndex(), m, m->GetAccessFlags(), false,
true);
@@ -357,9 +357,9 @@ void MethodVerifier::FindLocksAtDexPc() {
mirror::ArtField* MethodVerifier::FindAccessedFieldAtDexPc(mirror::ArtMethod* m,
uint32_t dex_pc) {
MethodHelper mh(m);
- Thread* self = Thread::Current();
- SirtRef<mirror::DexCache> dex_cache(self, mh.GetDexCache());
- SirtRef<mirror::ClassLoader> class_loader(self, mh.GetClassLoader());
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(mh.GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(mh.GetClassLoader()));
MethodVerifier verifier(&mh.GetDexFile(), &dex_cache, &class_loader, &mh.GetClassDef(),
mh.GetCodeItem(), m->GetDexMethodIndex(), m, m->GetAccessFlags(), true,
true);
@@ -388,9 +388,9 @@ mirror::ArtField* MethodVerifier::FindAccessedFieldAtDexPc(uint32_t dex_pc) {
mirror::ArtMethod* MethodVerifier::FindInvokedMethodAtDexPc(mirror::ArtMethod* m,
uint32_t dex_pc) {
MethodHelper mh(m);
- Thread* self = Thread::Current();
- SirtRef<mirror::DexCache> dex_cache(self, mh.GetDexCache());
- SirtRef<mirror::ClassLoader> class_loader(self, mh.GetClassLoader());
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(mh.GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(mh.GetClassLoader()));
MethodVerifier verifier(&mh.GetDexFile(), &dex_cache, &class_loader, &mh.GetClassDef(),
mh.GetCodeItem(), m->GetDexMethodIndex(), m, m->GetAccessFlags(), true,
true);
@@ -1834,7 +1834,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
<< array_type;
} else {
const RegType& component_type = reg_types_.GetComponentType(array_type,
- class_loader_->get());
+ class_loader_->Get());
DCHECK(!component_type.IsConflict());
if (component_type.IsNonZeroReferenceTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid fill-array-data with component type "
@@ -2149,7 +2149,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
uint32_t return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
const char* descriptor = dex_file_->StringByTypeIdx(return_type_idx);
- return_type = &reg_types_.FromDescriptor(class_loader_->get(), descriptor, false);
+ return_type = &reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
}
if (!return_type->IsLowHalf()) {
work_line_->SetResultRegisterType(*return_type);
@@ -2216,7 +2216,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
*/
work_line_->MarkRefsAsInitialized(this_type);
}
- const RegType& return_type = reg_types_.FromDescriptor(class_loader_->get(),
+ const RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(),
return_type_descriptor, false);
if (!return_type.IsLowHalf()) {
work_line_->SetResultRegisterType(return_type);
@@ -2242,7 +2242,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
} else {
descriptor = MethodHelper(called_method).GetReturnTypeDescriptor();
}
- const RegType& return_type = reg_types_.FromDescriptor(class_loader_->get(), descriptor,
+ const RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
false);
if (!return_type.IsLowHalf()) {
work_line_->SetResultRegisterType(return_type);
@@ -2300,7 +2300,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
} else {
descriptor = MethodHelper(abs_method).GetReturnTypeDescriptor();
}
- const RegType& return_type = reg_types_.FromDescriptor(class_loader_->get(), descriptor,
+ const RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
false);
if (!return_type.IsLowHalf()) {
work_line_->SetResultRegisterType(return_type);
@@ -2566,7 +2566,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
mirror::ArtMethod* called_method = VerifyInvokeVirtualQuickArgs(inst, is_range);
if (called_method != NULL) {
const char* descriptor = MethodHelper(called_method).GetReturnTypeDescriptor();
- const RegType& return_type = reg_types_.FromDescriptor(class_loader_->get(), descriptor,
+ const RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
false);
if (!return_type.IsLowHalf()) {
work_line_->SetResultRegisterType(return_type);
@@ -2837,7 +2837,7 @@ const RegType& MethodVerifier::ResolveClassAndCheckAccess(uint32_t class_idx) {
const RegType& result =
klass != NULL ? reg_types_.FromClass(descriptor, klass,
klass->CannotBeAssignedFromOtherTypes())
- : reg_types_.FromDescriptor(class_loader_->get(), descriptor, false);
+ : reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
if (result.IsConflict()) {
Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "accessing broken descriptor '" << descriptor
<< "' in " << referrer;
@@ -3093,7 +3093,7 @@ mirror::ArtMethod* MethodVerifier::VerifyInvocationArgs(const Instruction* inst,
<< " missing signature component";
return NULL;
}
- const RegType& reg_type = reg_types_.FromDescriptor(class_loader_->get(), descriptor, false);
+ const RegType& reg_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
uint32_t get_reg = is_range ? inst->VRegC_3rc() + actual_args : arg[actual_args];
if (reg_type.IsIntegralTypes()) {
const RegType& src_type = work_line_->GetRegisterType(get_reg);
@@ -3218,7 +3218,7 @@ mirror::ArtMethod* MethodVerifier::VerifyInvokeVirtualQuickArgs(const Instructio
<< " missing signature component";
return NULL;
}
- const RegType& reg_type = reg_types_.FromDescriptor(class_loader_->get(), descriptor, false);
+ const RegType& reg_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
uint32_t get_reg = is_range ? inst->VRegC_3rc() + actual_args : arg[actual_args];
if (!work_line_->VerifyRegisterType(get_reg, reg_type)) {
return res_method;
@@ -3262,7 +3262,7 @@ void MethodVerifier::VerifyNewArray(const Instruction* inst, bool is_filled, boo
} else {
// Verify each register. If "arg_count" is bad, VerifyRegisterType() will run off the end of
// the list and fail. It's legal, if silly, for arg_count to be zero.
- const RegType& expected_type = reg_types_.GetComponentType(res_type, class_loader_->get());
+ const RegType& expected_type = reg_types_.GetComponentType(res_type, class_loader_->Get());
uint32_t arg_count = (is_range) ? inst->VRegA_3rc() : inst->VRegA_35c();
uint32_t arg[5];
if (!is_range) {
@@ -3304,7 +3304,7 @@ void MethodVerifier::VerifyAGet(const Instruction* inst,
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "not array type " << array_type << " with aget";
} else {
/* verify the class */
- const RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->get());
+ const RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->Get());
if (!component_type.IsReferenceTypes() && !is_primitive) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "primitive array type " << array_type
<< " source for aget-object";
@@ -3381,7 +3381,7 @@ void MethodVerifier::VerifyAPut(const Instruction* inst,
} else if (!array_type.IsArrayTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "not array type " << array_type << " with aput";
} else {
- const RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->get());
+ const RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->Get());
const uint32_t vregA = inst->VRegA_23x();
if (is_primitive) {
VerifyPrimitivePut(component_type, insn_type, vregA);
@@ -3523,7 +3523,7 @@ void MethodVerifier::VerifyISGet(const Instruction* inst, const RegType& insn_ty
if (field_type == nullptr) {
const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
const char* descriptor = dex_file_->GetFieldTypeDescriptor(field_id);
- field_type = &reg_types_.FromDescriptor(class_loader_->get(), descriptor, false);
+ field_type = &reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
}
DCHECK(field_type != nullptr);
const uint32_t vregA = (is_static) ? inst->VRegA_21c() : inst->VRegA_22c();
@@ -3547,7 +3547,7 @@ void MethodVerifier::VerifyISGet(const Instruction* inst, const RegType& insn_ty
Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "expected field " << PrettyField(field)
<< " to be compatible with type '" << insn_type
<< "' but found type '" << *field_type
- << "' in get-object";
+ << "' in Get-object";
work_line_->SetRegisterType(vregA, reg_types_.Conflict());
return;
}
@@ -3590,7 +3590,7 @@ void MethodVerifier::VerifyISPut(const Instruction* inst, const RegType& insn_ty
if (field_type == nullptr) {
const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
const char* descriptor = dex_file_->GetFieldTypeDescriptor(field_id);
- field_type = &reg_types_.FromDescriptor(class_loader_->get(), descriptor, false);
+ field_type = &reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
}
DCHECK(field_type != nullptr);
const uint32_t vregA = (is_static) ? inst->VRegA_21c() : inst->VRegA_22c();
@@ -3666,7 +3666,7 @@ void MethodVerifier::VerifyIGetQuick(const Instruction* inst, const RegType& ins
// compile time
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected field " << PrettyField(field)
<< " to be of type '" << insn_type
- << "' but found type '" << *field_type << "' in get";
+ << "' but found type '" << *field_type << "' in Get";
return;
}
} else {
@@ -3842,7 +3842,7 @@ const RegType& MethodVerifier::GetMethodReturnType() {
const DexFile::ProtoId& proto_id = dex_file_->GetMethodPrototype(method_id);
uint16_t return_type_idx = proto_id.return_type_idx_;
const char* descriptor = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(return_type_idx));
- return_type_ = &reg_types_.FromDescriptor(class_loader_->get(), descriptor, false);
+ return_type_ = &reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
}
}
return *return_type_;
@@ -3858,7 +3858,7 @@ const RegType& MethodVerifier::GetDeclaringClass() {
declaring_class_ = &reg_types_.FromClass(descriptor, klass,
klass->CannotBeAssignedFromOtherTypes());
} else {
- declaring_class_ = &reg_types_.FromDescriptor(class_loader_->get(), descriptor, false);
+ declaring_class_ = &reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
}
}
return *declaring_class_;
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 5f13191..cea2403 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -38,7 +38,7 @@
namespace art {
struct ReferenceMap2Visitor;
-template<class T> class SirtRef;
+template<class T> class Handle;
namespace verifier {
@@ -142,15 +142,15 @@ class MethodVerifier {
/* Verify a class. Returns "kNoFailure" on success. */
static FailureKind VerifyClass(mirror::Class* klass, bool allow_soft_failures, std::string* error)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static FailureKind VerifyClass(const DexFile* dex_file, SirtRef<mirror::DexCache>& dex_cache,
- SirtRef<mirror::ClassLoader>& class_loader,
+ static FailureKind VerifyClass(const DexFile* dex_file, Handle<mirror::DexCache>& dex_cache,
+ Handle<mirror::ClassLoader>& class_loader,
const DexFile::ClassDef* class_def,
bool allow_soft_failures, std::string* error)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void VerifyMethodAndDump(std::ostream& os, uint32_t method_idx, const DexFile* dex_file,
- SirtRef<mirror::DexCache>& dex_cache,
- SirtRef<mirror::ClassLoader>& class_loader,
+ Handle<mirror::DexCache>& dex_cache,
+ Handle<mirror::ClassLoader>& class_loader,
const DexFile::ClassDef* class_def,
const DexFile::CodeItem* code_item,
mirror::ArtMethod* method, uint32_t method_access_flags)
@@ -205,8 +205,8 @@ class MethodVerifier {
return can_load_classes_;
}
- MethodVerifier(const DexFile* dex_file, SirtRef<mirror::DexCache>* dex_cache,
- SirtRef<mirror::ClassLoader>* class_loader, const DexFile::ClassDef* class_def,
+ MethodVerifier(const DexFile* dex_file, Handle<mirror::DexCache>* dex_cache,
+ Handle<mirror::ClassLoader>* class_loader, const DexFile::ClassDef* class_def,
const DexFile::CodeItem* code_item, uint32_t method_idx, mirror::ArtMethod* method,
uint32_t access_flags, bool can_load_classes, bool allow_soft_failures)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -255,8 +255,8 @@ class MethodVerifier {
* for code flow problems.
*/
static FailureKind VerifyMethod(uint32_t method_idx, const DexFile* dex_file,
- SirtRef<mirror::DexCache>& dex_cache,
- SirtRef<mirror::ClassLoader>& class_loader,
+ Handle<mirror::DexCache>& dex_cache,
+ Handle<mirror::ClassLoader>& class_loader,
const DexFile::ClassDef* class_def_idx,
const DexFile::CodeItem* code_item,
mirror::ArtMethod* method, uint32_t method_access_flags,
@@ -347,7 +347,7 @@ class MethodVerifier {
/* Ensure that the wide register index is valid for this code item. */
bool CheckWideRegisterIndex(uint32_t idx);
- // Perform static checks on a field get or set instruction. All we do here is ensure that the
+ // Perform static checks on a field Get or set instruction. All we do here is ensure that the
// field index is in the valid range.
bool CheckFieldIndex(uint32_t idx);
@@ -633,9 +633,9 @@ class MethodVerifier {
const RegType* return_type_; // Lazily computed return type of the method.
const DexFile* const dex_file_; // The dex file containing the method.
// The dex_cache for the declaring class of the method.
- SirtRef<mirror::DexCache>* dex_cache_ GUARDED_BY(Locks::mutator_lock_);
+ Handle<mirror::DexCache>* dex_cache_ GUARDED_BY(Locks::mutator_lock_);
// The class loader for the declaring class of the method.
- SirtRef<mirror::ClassLoader>* class_loader_ GUARDED_BY(Locks::mutator_lock_);
+ Handle<mirror::ClassLoader>* class_loader_ GUARDED_BY(Locks::mutator_lock_);
const DexFile::ClassDef* const class_def_; // The class def of the declaring class of the method.
const DexFile::CodeItem* const code_item_; // The code item containing the code for the method.
const RegType* declaring_class_; // Lazily computed reg type of the method's declaring class.
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index 111e867..689a33e 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -142,15 +142,16 @@ mirror::Class* RegTypeCache::ResolveClass(const char* descriptor, mirror::ClassL
// Try resolving class
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
Thread* self = Thread::Current();
- SirtRef<mirror::ClassLoader> class_loader(self, loader);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(loader));
mirror::Class* klass = NULL;
if (can_load_classes_) {
klass = class_linker->FindClass(self, descriptor, class_loader);
} else {
klass = class_linker->LookupClass(descriptor, loader);
- if (klass != NULL && !klass->IsLoaded()) {
+ if (klass != nullptr && !klass->IsLoaded()) {
// We found the class but without it being loaded its not safe for use.
- klass = NULL;
+ klass = nullptr;
}
}
return klass;
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index 31b0113..a3e3e3b 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -94,7 +94,7 @@ const RegType& RegisterLine::GetInvocationThis(const Instruction* inst, bool is_
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invoke lacks 'this'";
return verifier_->GetRegTypeCache()->Conflict();
}
- /* get the element type of the array held in vsrc */
+ /* Get the element type of the array held in vsrc */
const uint32_t this_reg = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
const RegType& this_type = GetRegisterType(this_reg);
if (!this_type.IsReferenceTypes()) {