summaryrefslogtreecommitdiffstats
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/common_compiler_test.h10
-rw-r--r--compiler/dex/mir_field_info.cc32
-rw-r--r--compiler/dex/mir_method_info.cc21
-rw-r--r--compiler/driver/compiler_driver-inl.h37
-rw-r--r--compiler/driver/compiler_driver.cc153
-rw-r--r--compiler/driver/compiler_driver.h18
-rw-r--r--compiler/driver/compiler_driver_test.cc9
-rw-r--r--compiler/elf_writer_mclinker.cc4
-rw-r--r--compiler/image_writer.cc75
-rw-r--r--compiler/jni/jni_compiler_test.cc14
-rw-r--r--compiler/jni/portable/jni_compiler.cc38
-rw-r--r--compiler/jni/quick/arm/calling_convention_arm.cc6
-rw-r--r--compiler/jni/quick/arm64/calling_convention_arm64.cc6
-rw-r--r--compiler/jni/quick/calling_convention.cc14
-rw-r--r--compiler/jni/quick/calling_convention.h33
-rw-r--r--compiler/jni/quick/jni_compiler.cc90
-rw-r--r--compiler/jni/quick/mips/calling_convention_mips.cc6
-rw-r--r--compiler/jni/quick/x86/calling_convention_x86.cc6
-rw-r--r--compiler/jni/quick/x86_64/calling_convention_x86_64.cc6
-rw-r--r--compiler/oat_test.cc3
-rw-r--r--compiler/oat_writer.cc7
-rw-r--r--compiler/utils/arm/assembler_arm.cc32
-rw-r--r--compiler/utils/arm/assembler_arm.h14
-rw-r--r--compiler/utils/arm64/assembler_arm64.cc30
-rw-r--r--compiler/utils/arm64/assembler_arm64.h14
-rw-r--r--compiler/utils/assembler.h14
-rw-r--r--compiler/utils/mips/assembler_mips.cc34
-rw-r--r--compiler/utils/mips/assembler_mips.h14
-rw-r--r--compiler/utils/x86/assembler_x86.cc22
-rw-r--r--compiler/utils/x86/assembler_x86.h14
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc24
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h14
32 files changed, 423 insertions, 391 deletions
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index 8f39212..586c442 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -257,7 +257,8 @@ class CommonCompilerTest : public CommonRuntimeTest {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::string class_descriptor(DotToDescriptor(class_name));
Thread* self = Thread::Current();
- SirtRef<mirror::ClassLoader> loader(self, class_loader);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ClassLoader> loader(hs.NewHandle(class_loader));
mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), loader);
CHECK(klass != nullptr) << "Class not found " << class_name;
for (size_t i = 0; i < klass->NumDirectMethods(); i++) {
@@ -352,7 +353,8 @@ class CommonCompilerTest : public CommonRuntimeTest {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::string class_descriptor(DotToDescriptor(class_name));
Thread* self = Thread::Current();
- SirtRef<mirror::ClassLoader> loader(self, class_loader);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ClassLoader> loader(hs.NewHandle(class_loader));
mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), loader);
CHECK(klass != nullptr) << "Class not found " << class_name;
for (size_t i = 0; i < klass->NumDirectMethods(); i++) {
@@ -372,7 +374,7 @@ class CommonCompilerTest : public CommonRuntimeTest {
timings.EndSplit();
}
- void CompileDirectMethod(SirtRef<mirror::ClassLoader>& class_loader, const char* class_name,
+ void CompileDirectMethod(Handle<mirror::ClassLoader>& class_loader, const char* class_name,
const char* method_name, const char* signature)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::string class_descriptor(DotToDescriptor(class_name));
@@ -385,7 +387,7 @@ class CommonCompilerTest : public CommonRuntimeTest {
CompileMethod(method);
}
- void CompileVirtualMethod(SirtRef<mirror::ClassLoader>& class_loader, const char* class_name,
+ void CompileVirtualMethod(Handle<mirror::ClassLoader>& class_loader, const char* class_name,
const char* method_name, const char* signature)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::string class_descriptor(DotToDescriptor(class_name));
diff --git a/compiler/dex/mir_field_info.cc b/compiler/dex/mir_field_info.cc
index 7c630e8..98866d9 100644
--- a/compiler/dex/mir_field_info.cc
+++ b/compiler/dex/mir_field_info.cc
@@ -21,10 +21,10 @@
#include "base/logging.h"
#include "driver/compiler_driver.h"
#include "driver/compiler_driver-inl.h"
-#include "mirror/class_loader.h" // Only to allow casts in SirtRef<ClassLoader>.
-#include "mirror/dex_cache.h" // Only to allow casts in SirtRef<DexCache>.
+#include "mirror/class_loader.h" // Only to allow casts in Handle<ClassLoader>.
+#include "mirror/dex_cache.h" // Only to allow casts in Handle<DexCache>.
#include "scoped_thread_state_change.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
namespace art {
@@ -43,11 +43,12 @@ void MirIFieldLoweringInfo::Resolve(CompilerDriver* compiler_driver,
// We're going to resolve fields and check access in a tight loop. It's better to hold
// the lock and needed references once than re-acquiring them again and again.
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), compiler_driver->GetDexCache(mUnit));
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- compiler_driver->GetClassLoader(soa, mUnit));
- SirtRef<mirror::Class> referrer_class(soa.Self(),
- compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit));
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(compiler_driver->GetDexCache(mUnit)));
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(compiler_driver->GetClassLoader(soa, mUnit)));
+ Handle<mirror::Class> referrer_class(hs.NewHandle(
+ compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit)));
// Even if the referrer class is unresolved (i.e. we're compiling a method without class
// definition) we still want to resolve fields and record all available info.
@@ -63,7 +64,7 @@ void MirIFieldLoweringInfo::Resolve(CompilerDriver* compiler_driver,
bool is_volatile = compiler_driver->IsFieldVolatile(resolved_field);
std::pair<bool, bool> fast_path = compiler_driver->IsFastInstanceField(
- dex_cache.get(), referrer_class.get(), resolved_field, field_idx, &it->field_offset_);
+ dex_cache.Get(), referrer_class.Get(), resolved_field, field_idx, &it->field_offset_);
it->flags_ = 0u | // Without kFlagIsStatic.
(is_volatile ? kFlagIsVolatile : 0u) |
(fast_path.first ? kFlagFastGet : 0u) |
@@ -89,11 +90,12 @@ void MirSFieldLoweringInfo::Resolve(CompilerDriver* compiler_driver,
// We're going to resolve fields and check access in a tight loop. It's better to hold
// the lock and needed references once than re-acquiring them again and again.
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), compiler_driver->GetDexCache(mUnit));
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- compiler_driver->GetClassLoader(soa, mUnit));
- SirtRef<mirror::Class> referrer_class(soa.Self(),
- compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit));
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(compiler_driver->GetDexCache(mUnit)));
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(compiler_driver->GetClassLoader(soa, mUnit)));
+ Handle<mirror::Class> referrer_class(hs.NewHandle(
+ compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit)));
// Even if the referrer class is unresolved (i.e. we're compiling a method without class
// definition) we still want to resolve fields and record all available info.
@@ -110,7 +112,7 @@ void MirSFieldLoweringInfo::Resolve(CompilerDriver* compiler_driver,
bool is_referrers_class, is_initialized;
std::pair<bool, bool> fast_path = compiler_driver->IsFastStaticField(
- dex_cache.get(), referrer_class.get(), resolved_field, field_idx, &it->field_offset_,
+ dex_cache.Get(), referrer_class.Get(), resolved_field, field_idx, &it->field_offset_,
&it->storage_index_, &is_referrers_class, &is_initialized);
it->flags_ = kFlagIsStatic |
(is_volatile ? kFlagIsVolatile : 0u) |
diff --git a/compiler/dex/mir_method_info.cc b/compiler/dex/mir_method_info.cc
index 2c33ef1..cc2bd95 100644
--- a/compiler/dex/mir_method_info.cc
+++ b/compiler/dex/mir_method_info.cc
@@ -19,10 +19,10 @@
#include "driver/compiler_driver.h"
#include "driver/dex_compilation_unit.h"
#include "driver/compiler_driver-inl.h"
-#include "mirror/class_loader.h" // Only to allow casts in SirtRef<ClassLoader>.
-#include "mirror/dex_cache.h" // Only to allow casts in SirtRef<DexCache>.
+#include "mirror/class_loader.h" // Only to allow casts in Handle<ClassLoader>.
+#include "mirror/dex_cache.h" // Only to allow casts in Handle<DexCache>.
#include "scoped_thread_state_change.h"
-#include "sirt_ref.h"
+#include "handle_scope-inl.h"
namespace art {
@@ -45,11 +45,12 @@ void MirMethodLoweringInfo::Resolve(CompilerDriver* compiler_driver,
// We're going to resolve methods and check access in a tight loop. It's better to hold
// the lock and needed references once than re-acquiring them again and again.
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), compiler_driver->GetDexCache(mUnit));
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- compiler_driver->GetClassLoader(soa, mUnit));
- SirtRef<mirror::Class> referrer_class(soa.Self(),
- compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit));
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(compiler_driver->GetDexCache(mUnit)));
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(compiler_driver->GetClassLoader(soa, mUnit)));
+ Handle<mirror::Class> referrer_class(hs.NewHandle(
+ compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit)));
// Even if the referrer class is unresolved (i.e. we're compiling a method without class
// definition) we still want to resolve methods and record all available info.
@@ -73,10 +74,10 @@ void MirMethodLoweringInfo::Resolve(CompilerDriver* compiler_driver,
MethodReference target_method(mUnit->GetDexFile(), it->MethodIndex());
int fast_path_flags = compiler_driver->IsFastInvoke(
- soa, dex_cache, class_loader, mUnit, referrer_class.get(), resolved_method, &invoke_type,
+ soa, dex_cache, class_loader, mUnit, referrer_class.Get(), resolved_method, &invoke_type,
&target_method, devirt_target, &it->direct_code_, &it->direct_method_);
bool needs_clinit =
- compiler_driver->NeedsClassInitialization(referrer_class.get(), resolved_method);
+ compiler_driver->NeedsClassInitialization(referrer_class.Get(), resolved_method);
uint16_t other_flags = it->flags_ &
~(kFlagFastPath | kFlagNeedsClassInitialization | (kInvokeTypeMask << kBitSharpTypeBegin));
it->flags_ = other_flags |
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index d9f2a3a..08fd386 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -28,7 +28,7 @@
#include "mirror/dex_cache-inl.h"
#include "mirror/art_field-inl.h"
#include "scoped_thread_state_change.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
namespace art {
@@ -42,10 +42,10 @@ inline mirror::ClassLoader* CompilerDriver::GetClassLoader(ScopedObjectAccess& s
}
inline mirror::Class* CompilerDriver::ResolveCompilingMethodsClass(
- ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit) {
- DCHECK(dex_cache->GetDexFile() == mUnit->GetDexFile());
- DCHECK(class_loader.get() == soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit) {
+ DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile());
+ DCHECK_EQ(class_loader.Get(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
const DexFile::MethodId& referrer_method_id =
mUnit->GetDexFile()->GetMethodId(mUnit->GetDexMethodIndex());
mirror::Class* referrer_class = mUnit->GetClassLinker()->ResolveType(
@@ -59,11 +59,11 @@ inline mirror::Class* CompilerDriver::ResolveCompilingMethodsClass(
}
inline mirror::ArtField* CompilerDriver::ResolveField(
- ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+ ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
uint32_t field_idx, bool is_static) {
- DCHECK(dex_cache->GetDexFile() == mUnit->GetDexFile());
- DCHECK(class_loader.get() == soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile());
+ DCHECK_EQ(class_loader.Get(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
mirror::ArtField* resolved_field = mUnit->GetClassLinker()->ResolveField(
*mUnit->GetDexFile(), field_idx, dex_cache, class_loader, is_static);
DCHECK_EQ(resolved_field == nullptr, soa.Self()->IsExceptionPending());
@@ -165,11 +165,11 @@ inline std::pair<bool, bool> CompilerDriver::IsFastStaticField(
}
inline mirror::ArtMethod* CompilerDriver::ResolveMethod(
- ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+ ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
uint32_t method_idx, InvokeType invoke_type) {
DCHECK(dex_cache->GetDexFile() == mUnit->GetDexFile());
- DCHECK(class_loader.get() == soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ DCHECK(class_loader.Get() == soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
mirror::ArtMethod* resolved_method = mUnit->GetClassLinker()->ResolveMethod(
*mUnit->GetDexFile(), method_idx, dex_cache, class_loader, nullptr, invoke_type);
DCHECK_EQ(resolved_method == nullptr, soa.Self()->IsExceptionPending());
@@ -206,8 +206,8 @@ inline uint16_t CompilerDriver::GetResolvedMethodVTableIndex(
}
inline int CompilerDriver::IsFastInvoke(
- ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+ ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
mirror::Class* referrer_class, mirror::ArtMethod* resolved_method, InvokeType* invoke_type,
MethodReference* target_method, const MethodReference* devirt_target,
uintptr_t* direct_code, uintptr_t* direct_method) {
@@ -217,7 +217,7 @@ inline int CompilerDriver::IsFastInvoke(
}
mirror::Class* methods_class = resolved_method->GetDeclaringClass();
if (UNLIKELY(!referrer_class->CanAccessResolvedMethod(methods_class, resolved_method,
- dex_cache.get(),
+ dex_cache.Get(),
target_method->dex_method_index))) {
return 0;
}
@@ -237,7 +237,7 @@ inline int CompilerDriver::IsFastInvoke(
// Sharpen a virtual call into a direct call. The method_idx is into referrer's
// dex cache, check that this resolved method is where we expect it.
CHECK(target_method->dex_file == mUnit->GetDexFile());
- DCHECK(dex_cache.get() == mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
+ DCHECK(dex_cache.Get() == mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
CHECK(referrer_class->GetDexCache()->GetResolvedMethod(target_method->dex_method_index) ==
resolved_method) << PrettyMethod(resolved_method);
int stats_flags = kFlagMethodResolved;
@@ -259,8 +259,9 @@ inline int CompilerDriver::IsFastInvoke(
devirt_target->dex_method_index,
dex_cache, class_loader, NULL, kVirtual);
} else {
- SirtRef<mirror::DexCache> target_dex_cache(soa.Self(),
- class_linker->FindDexCache(*devirt_target->dex_file));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::DexCache> target_dex_cache(
+ hs.NewHandle(class_linker->FindDexCache(*devirt_target->dex_file)));
called_method = class_linker->ResolveMethod(*devirt_target->dex_file,
devirt_target->dex_method_index,
target_dex_cache, class_loader, NULL, kVirtual);
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 6817f14..547b9f7 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -49,7 +49,7 @@
#include "mirror/throwable.h"
#include "scoped_thread_state_change.h"
#include "ScopedLocalRef.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
#include "thread.h"
#include "thread_pool.h"
#include "trampolines/trampoline_compiler.h"
@@ -509,7 +509,7 @@ void CompilerDriver::CompileAll(jobject class_loader,
}
static DexToDexCompilationLevel GetDexToDexCompilationlevel(
- Thread* self, SirtRef<mirror::ClassLoader>& class_loader, const DexFile& dex_file,
+ Thread* self, Handle<mirror::ClassLoader>& class_loader, const DexFile& dex_file,
const DexFile::ClassDef& class_def) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const char* descriptor = dex_file.GetClassDescriptor(class_def);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -524,7 +524,7 @@ static DexToDexCompilationLevel GetDexToDexCompilationlevel(
// function). Since image classes can be verified again while compiling an application,
// we must prevent the DEX-to-DEX compiler from introducing them.
// TODO: find a way to enable "quick" instructions for image classes and remove this check.
- bool compiling_image_classes = class_loader.get() == nullptr;
+ bool compiling_image_classes = class_loader.Get() == nullptr;
if (compiling_image_classes) {
return kRequired;
} else if (klass->IsVerified()) {
@@ -574,8 +574,9 @@ void CompilerDriver::CompileOne(mirror::ArtMethod* method, TimingLogger* timings
{
ScopedObjectAccess soa(Thread::Current());
const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(jclass_loader));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
dex_to_dex_compilation_level = GetDexToDexCompilationlevel(self, class_loader, *dex_file,
class_def);
}
@@ -700,8 +701,10 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings)
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
for (auto it = image_classes_->begin(), end = image_classes_->end(); it != end;) {
const std::string& descriptor(*it);
- SirtRef<mirror::Class> klass(self, class_linker->FindSystemClass(self, descriptor.c_str()));
- if (klass.get() == NULL) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> klass(
+ hs.NewHandle(class_linker->FindSystemClass(self, descriptor.c_str())));
+ if (klass.Get() == NULL) {
VLOG(compiler) << "Failed to find class " << descriptor;
image_classes_->erase(it++);
self->ClearException();
@@ -714,8 +717,9 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings)
// exceptions are resolved by the verifier when there is a catch block in an interested method.
// Do this here so that exception classes appear to have been specified image classes.
std::set<std::pair<uint16_t, const DexFile*> > unresolved_exception_types;
- SirtRef<mirror::Class> java_lang_Throwable(self,
- class_linker->FindSystemClass(self, "Ljava/lang/Throwable;"));
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> java_lang_Throwable(
+ hs.NewHandle(class_linker->FindSystemClass(self, "Ljava/lang/Throwable;")));
do {
unresolved_exception_types.clear();
class_linker->VisitClasses(ResolveCatchBlockExceptionsClassVisitor,
@@ -723,16 +727,17 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings)
for (const std::pair<uint16_t, const DexFile*>& exception_type : unresolved_exception_types) {
uint16_t exception_type_idx = exception_type.first;
const DexFile* dex_file = exception_type.second;
- SirtRef<mirror::DexCache> dex_cache(self, class_linker->FindDexCache(*dex_file));
- SirtRef<mirror::ClassLoader> class_loader(self, nullptr);
- SirtRef<mirror::Class> klass(self, class_linker->ResolveType(*dex_file, exception_type_idx,
- dex_cache, class_loader));
- if (klass.get() == NULL) {
+ StackHandleScope<3> hs(self);
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(*dex_file)));
+ auto class_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
+ Handle<mirror::Class> klass(hs.NewHandle(
+ class_linker->ResolveType(*dex_file, exception_type_idx, dex_cache, class_loader)));
+ if (klass.Get() == NULL) {
const DexFile::TypeId& type_id = dex_file->GetTypeId(exception_type_idx);
const char* descriptor = dex_file->GetTypeDescriptor(type_id);
LOG(FATAL) << "Failed to resolve class " << descriptor;
}
- DCHECK(java_lang_Throwable->IsAssignableFrom(klass.get()));
+ DCHECK(java_lang_Throwable->IsAssignableFrom(klass.Get()));
}
// Resolving exceptions may load classes that reference more exceptions, iterate until no
// more are found
@@ -816,7 +821,9 @@ bool CompilerDriver::CanAssumeStringIsPresentInDexCache(const DexFile& dex_file,
if (IsImage()) {
// We resolve all const-string strings when building for the image.
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), Runtime::Current()->GetClassLinker()->FindDexCache(dex_file));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(
+ hs.NewHandle(Runtime::Current()->GetClassLinker()->FindDexCache(dex_file)));
Runtime::Current()->GetClassLinker()->ResolveString(dex_file, string_idx, dex_cache);
result = true;
}
@@ -980,16 +987,17 @@ bool CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompi
mirror::Class* referrer_class;
mirror::DexCache* dex_cache;
{
- SirtRef<mirror::DexCache> dex_cache_sirt(soa.Self(),
- mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
- SirtRef<mirror::ClassLoader> class_loader_sirt(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
- SirtRef<mirror::ArtField> resolved_field_sirt(soa.Self(),
- ResolveField(soa, dex_cache_sirt, class_loader_sirt, mUnit, field_idx, false));
- referrer_class = (resolved_field_sirt.get() != nullptr)
- ? ResolveCompilingMethodsClass(soa, dex_cache_sirt, class_loader_sirt, mUnit) : nullptr;
- resolved_field = resolved_field_sirt.get();
- dex_cache = dex_cache_sirt.get();
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache_handle(
+ hs.NewHandle(mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile())));
+ Handle<mirror::ClassLoader> class_loader_handle(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader())));
+ Handle<mirror::ArtField> resolved_field_handle(hs.NewHandle(
+ ResolveField(soa, dex_cache_handle, class_loader_handle, mUnit, field_idx, false)));
+ referrer_class = (resolved_field_handle.Get() != nullptr)
+ ? ResolveCompilingMethodsClass(soa, dex_cache_handle, class_loader_handle, mUnit) : nullptr;
+ resolved_field = resolved_field_handle.Get();
+ dex_cache = dex_cache_handle.Get();
}
bool result = false;
if (resolved_field != nullptr && referrer_class != nullptr) {
@@ -1017,16 +1025,17 @@ bool CompilerDriver::ComputeStaticFieldInfo(uint32_t field_idx, const DexCompila
mirror::Class* referrer_class;
mirror::DexCache* dex_cache;
{
- SirtRef<mirror::DexCache> dex_cache_sirt(soa.Self(),
- mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
- SirtRef<mirror::ClassLoader> class_loader_sirt(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
- SirtRef<mirror::ArtField> resolved_field_sirt(soa.Self(),
- ResolveField(soa, dex_cache_sirt, class_loader_sirt, mUnit, field_idx, true));
- referrer_class = (resolved_field_sirt.get() != nullptr)
- ? ResolveCompilingMethodsClass(soa, dex_cache_sirt, class_loader_sirt, mUnit) : nullptr;
- resolved_field = resolved_field_sirt.get();
- dex_cache = dex_cache_sirt.get();
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache_handle(
+ hs.NewHandle(mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile())));
+ Handle<mirror::ClassLoader> class_loader_handle(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader())));
+ Handle<mirror::ArtField> resolved_field_handle(hs.NewHandle(
+ ResolveField(soa, dex_cache_handle, class_loader_handle, mUnit, field_idx, true)));
+ referrer_class = (resolved_field_handle.Get() != nullptr)
+ ? ResolveCompilingMethodsClass(soa, dex_cache_handle, class_loader_handle, mUnit) : nullptr;
+ resolved_field = resolved_field_handle.Get();
+ dex_cache = dex_cache_handle.Get();
}
bool result = false;
if (resolved_field != nullptr && referrer_class != nullptr) {
@@ -1168,17 +1177,18 @@ bool CompilerDriver::ComputeInvokeInfo(const DexCompilationUnit* mUnit, const ui
// Try to resolve the method and compiling method's class.
mirror::ArtMethod* resolved_method;
mirror::Class* referrer_class;
- SirtRef<mirror::DexCache> dex_cache(soa.Self(),
- mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(
+ hs.NewHandle(mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile())));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
+ soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader())));
{
uint32_t method_idx = target_method->dex_method_index;
- SirtRef<mirror::ArtMethod> resolved_method_sirt(soa.Self(),
- ResolveMethod(soa, dex_cache, class_loader, mUnit, method_idx, orig_invoke_type));
- referrer_class = (resolved_method_sirt.get() != nullptr)
+ Handle<mirror::ArtMethod> resolved_method_handle(hs.NewHandle(
+ ResolveMethod(soa, dex_cache, class_loader, mUnit, method_idx, orig_invoke_type)));
+ referrer_class = (resolved_method_handle.Get() != nullptr)
? ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit) : nullptr;
- resolved_method = resolved_method_sirt.get();
+ resolved_method = resolved_method_handle.Get();
}
bool result = false;
if (resolved_method != nullptr) {
@@ -1196,7 +1206,7 @@ bool CompilerDriver::ComputeInvokeInfo(const DexCompilationUnit* mUnit, const ui
// Devirtualization not enabled. Inline IsFastInvoke(), dropping the devirtualization parts.
if (UNLIKELY(referrer_class == nullptr) ||
UNLIKELY(!referrer_class->CanAccessResolvedMethod(resolved_method->GetDeclaringClass(),
- resolved_method, dex_cache.get(),
+ resolved_method, dex_cache.Get(),
target_method->dex_method_index)) ||
*invoke_type == kSuper) {
// Slow path. (Without devirtualization, all super calls go slow path as well.)
@@ -1469,8 +1479,10 @@ static void ResolveClassFieldsAndMethods(const ParallelCompilationManager* manag
const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
if (!SkipClass(class_linker, jclass_loader, dex_file, class_def)) {
ScopedObjectAccess soa(self);
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(jclass_loader));
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), class_linker->FindDexCache(dex_file));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
// Resolve the class.
mirror::Class* klass = class_linker->ResolveType(dex_file, class_def.class_idx_, dex_cache,
class_loader);
@@ -1556,9 +1568,10 @@ static void ResolveType(const ParallelCompilationManager* manager, size_t type_i
ScopedObjectAccess soa(Thread::Current());
ClassLinker* class_linker = manager->GetClassLinker();
const DexFile& dex_file = *manager->GetDexFile();
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), class_linker->FindDexCache(dex_file));
- SirtRef<mirror::ClassLoader> class_loader(
- soa.Self(), soa.Decode<mirror::ClassLoader*>(manager->GetClassLoader()));
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(manager->GetClassLoader())));
mirror::Class* klass = class_linker->ResolveType(dex_file, type_idx, dex_cache, class_loader);
if (klass == NULL) {
@@ -1611,11 +1624,12 @@ static void VerifyClass(const ParallelCompilationManager* manager, size_t class_
const char* descriptor = dex_file.GetClassDescriptor(class_def);
ClassLinker* class_linker = manager->GetClassLinker();
jobject jclass_loader = manager->GetClassLoader();
- SirtRef<mirror::ClassLoader> class_loader(
- soa.Self(), soa.Decode<mirror::ClassLoader*>(jclass_loader));
- SirtRef<mirror::Class> klass(soa.Self(), class_linker->FindClass(soa.Self(), descriptor,
- class_loader));
- if (klass.get() == nullptr) {
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+ Handle<mirror::Class> klass(
+ hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader)));
+ if (klass.Get() == nullptr) {
CHECK(soa.Self()->IsExceptionPending());
soa.Self()->ClearException();
@@ -1624,7 +1638,7 @@ static void VerifyClass(const ParallelCompilationManager* manager, size_t class_
* This is to ensure the class is structurally sound for compilation. An unsound class
* will be rejected by the verifier and later skipped during compilation in the compiler.
*/
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), class_linker->FindDexCache(dex_file));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
std::string error_msg;
if (verifier::MethodVerifier::VerifyClass(&dex_file, dex_cache, class_loader, &class_def, true,
&error_msg) ==
@@ -1632,8 +1646,8 @@ static void VerifyClass(const ParallelCompilationManager* manager, size_t class_
LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(descriptor)
<< " because: " << error_msg;
}
- } else if (!SkipClass(jclass_loader, dex_file, klass.get())) {
- CHECK(klass->IsResolved()) << PrettyClass(klass.get());
+ } else if (!SkipClass(jclass_loader, dex_file, klass.Get())) {
+ CHECK(klass->IsResolved()) << PrettyClass(klass.Get());
class_linker->VerifyClass(klass);
if (klass->IsErroneous()) {
@@ -1643,7 +1657,7 @@ static void VerifyClass(const ParallelCompilationManager* manager, size_t class_
}
CHECK(klass->IsCompileTimeVerified() || klass->IsErroneous())
- << PrettyDescriptor(klass.get()) << ": state=" << klass->GetStatus();
+ << PrettyDescriptor(klass.Get()) << ": state=" << klass->GetStatus();
}
soa.Self()->AssertNoPendingException();
}
@@ -1666,13 +1680,13 @@ static void InitializeClass(const ParallelCompilationManager* manager, size_t cl
const char* descriptor = dex_file.StringDataByIdx(class_type_id.descriptor_idx_);
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(jclass_loader));
- SirtRef<mirror::Class> klass(soa.Self(),
- manager->GetClassLinker()->FindClass(soa.Self(), descriptor,
- class_loader));
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+ Handle<mirror::Class> klass(
+ hs.NewHandle(manager->GetClassLinker()->FindClass(soa.Self(), descriptor, class_loader)));
- if (klass.get() != nullptr && !SkipClass(jclass_loader, dex_file, klass.get())) {
+ if (klass.Get() != nullptr && !SkipClass(jclass_loader, dex_file, klass.Get())) {
// Only try to initialize classes that were successfully verified.
if (klass->IsVerified()) {
// Attempt to initialize the class but bail if we either need to initialize the super-class
@@ -1687,8 +1701,8 @@ static void InitializeClass(const ParallelCompilationManager* manager, size_t cl
// parent-to-child and a child-to-parent lock ordering and consequent potential deadlock.
// We need to use an ObjectLock due to potential suspension in the interpreting code. Rather
// than use a special Object for the purpose we use the Class of java.lang.Class.
- SirtRef<mirror::Class> sirt_klass(soa.Self(), klass->GetClass());
- ObjectLock<mirror::Class> lock(soa.Self(), &sirt_klass);
+ Handle<mirror::Class> h_klass(hs.NewHandle(klass->GetClass()));
+ ObjectLock<mirror::Class> lock(soa.Self(), &h_klass);
// Attempt to initialize allowing initialization of parent classes but still not static
// fields.
manager->GetClassLinker()->EnsureInitialized(klass, false, true);
@@ -1803,8 +1817,9 @@ void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, siz
DexToDexCompilationLevel dex_to_dex_compilation_level = kDontDexToDexCompile;
{
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(jclass_loader));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
dex_to_dex_compilation_level = GetDexToDexCompilationlevel(soa.Self(), class_loader, dex_file,
class_def);
}
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 6ac9cf7..f3db41f 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -52,7 +52,7 @@ struct InlineIGetIPutData;
class OatWriter;
class ParallelCompilationManager;
class ScopedObjectAccess;
-template<class T> class SirtRef;
+template<class T> class Handle;
class TimingLogger;
class VerificationResults;
class VerifiedMethod;
@@ -221,15 +221,15 @@ class CompilerDriver {
// Resolve compiling method's class. Returns nullptr on failure.
mirror::Class* ResolveCompilingMethodsClass(
- ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit)
+ ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Resolve a field. Returns nullptr on failure, including incompatible class change.
// NOTE: Unlike ClassLinker's ResolveField(), this method enforces is_static.
mirror::ArtField* ResolveField(
- ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+ ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
uint32_t field_idx, bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -258,8 +258,8 @@ class CompilerDriver {
// Resolve a method. Returns nullptr on failure, including incompatible class change.
mirror::ArtMethod* ResolveMethod(
- ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+ ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
uint32_t method_idx, InvokeType invoke_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -277,8 +277,8 @@ class CompilerDriver {
// Can we fast-path an INVOKE? If no, returns 0. If yes, returns a non-zero opaque flags value
// for ProcessedInvoke() and computes the necessary lowering info.
int IsFastInvoke(
- ScopedObjectAccess& soa, const SirtRef<mirror::DexCache>& dex_cache,
- const SirtRef<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
+ ScopedObjectAccess& soa, const Handle<mirror::DexCache>& dex_cache,
+ const Handle<mirror::ClassLoader>& class_loader, const DexCompilationUnit* mUnit,
mirror::Class* referrer_class, mirror::ArtMethod* resolved_method, InvokeType* invoke_type,
MethodReference* target_method, const MethodReference* devirt_target,
uintptr_t* direct_code, uintptr_t* direct_method)
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 86034c8..113594a 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -30,7 +30,7 @@
#include "mirror/dex_cache-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
namespace art {
@@ -80,7 +80,9 @@ class CompilerDriverTest : public CommonCompilerTest {
const DexFile::ClassDef& class_def = dex_file.GetClassDef(i);
const char* descriptor = dex_file.GetClassDescriptor(class_def);
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(class_loader));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader)));
mirror::Class* c = class_linker->FindClass(soa.Self(), descriptor, loader);
CHECK(c != NULL);
for (size_t i = 0; i < c->NumDirectMethods(); i++) {
@@ -150,7 +152,8 @@ TEST_F(CompilerDriverTest, AbstractMethodErrorStub) {
jobject class_loader;
{
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> null_loader(soa.Self(), nullptr);
+ StackHandleScope<1> hs(soa.Self());
+ auto null_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
CompileVirtualMethod(null_loader, "java.lang.Class", "isFinalizable", "()Z");
CompileDirectMethod(null_loader, "java.lang.Object", "<init>", "()V");
class_loader = LoadDex("AbstractMethod");
diff --git a/compiler/elf_writer_mclinker.cc b/compiler/elf_writer_mclinker.cc
index f688103..eb9b230 100644
--- a/compiler/elf_writer_mclinker.cc
+++ b/compiler/elf_writer_mclinker.cc
@@ -361,8 +361,8 @@ void ElfWriterMclinker::FixupOatMethodOffsets(const std::vector<const DexFile*>&
ClassLinker* linker = Runtime::Current()->GetClassLinker();
// Unchecked as we hold mutator_lock_ on entry.
ScopedObjectAccessUnchecked soa(Thread::Current());
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), linker->FindDexCache(dex_file));
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), nullptr);
+ Handle<mirror::DexCache> dex_cache(soa.Self(), linker->FindDexCache(dex_file));
+ Handle<mirror::ClassLoader> class_loader(soa.Self(), nullptr);
method = linker->ResolveMethod(dex_file, method_idx, dex_cache, class_loader, NULL, invoke_type);
CHECK(method != NULL);
}
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 62817e7..d855eee 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -51,7 +51,7 @@
#include "object_utils.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
#include "UniquePtr.h"
#include "utils.h"
@@ -382,16 +382,14 @@ void ImageWriter::CalculateObjectOffsets(Object* obj) {
DCHECK_EQ(obj, obj->AsString()->Intern());
return;
}
- Thread* self = Thread::Current();
- SirtRef<Object> sirt_obj(self, obj);
- mirror::String* interned = obj->AsString()->Intern();
- if (sirt_obj.get() != interned) {
+ mirror::String* const interned = obj->AsString()->Intern();
+ if (obj != interned) {
if (!IsImageOffsetAssigned(interned)) {
// interned obj is after us, allocate its location early
AssignImageOffset(interned);
}
// point those looking for this object to the interned version.
- SetImageOffset(sirt_obj.get(), GetImageOffset(interned));
+ SetImageOffset(obj, GetImageOffset(interned));
return;
}
// else (obj == interned), nothing to do but fall through to the normal case
@@ -404,20 +402,22 @@ ObjectArray<Object>* ImageWriter::CreateImageRoots() const {
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
Thread* self = Thread::Current();
- SirtRef<Class> object_array_class(self, class_linker->FindSystemClass(self,
- "[Ljava/lang/Object;"));
+ StackHandleScope<3> hs(self);
+ Handle<Class> object_array_class(hs.NewHandle(
+ class_linker->FindSystemClass(self, "[Ljava/lang/Object;")));
// build an Object[] of all the DexCaches used in the source_space_
- ObjectArray<Object>* dex_caches = ObjectArray<Object>::Alloc(self, object_array_class.get(),
- class_linker->GetDexCaches().size());
+ Handle<ObjectArray<Object>> dex_caches(
+ hs.NewHandle(ObjectArray<Object>::Alloc(self, object_array_class.Get(),
+ class_linker->GetDexCaches().size())));
int i = 0;
for (DexCache* dex_cache : class_linker->GetDexCaches()) {
dex_caches->Set<false>(i++, dex_cache);
}
// build an Object[] of the roots needed to restore the runtime
- SirtRef<ObjectArray<Object> > image_roots(
- self, ObjectArray<Object>::Alloc(self, object_array_class.get(), ImageHeader::kImageRootsMax));
+ Handle<ObjectArray<Object> > image_roots(hs.NewHandle(
+ ObjectArray<Object>::Alloc(self, object_array_class.Get(), ImageHeader::kImageRootsMax)));
image_roots->Set<false>(ImageHeader::kResolutionMethod, runtime->GetResolutionMethod());
image_roots->Set<false>(ImageHeader::kImtConflictMethod, runtime->GetImtConflictMethod());
image_roots->Set<false>(ImageHeader::kDefaultImt, runtime->GetDefaultImt());
@@ -427,27 +427,28 @@ ObjectArray<Object>* ImageWriter::CreateImageRoots() const {
runtime->GetCalleeSaveMethod(Runtime::kRefsOnly));
image_roots->Set<false>(ImageHeader::kRefsAndArgsSaveMethod,
runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs));
- image_roots->Set<false>(ImageHeader::kDexCaches, dex_caches);
+ image_roots->Set<false>(ImageHeader::kDexCaches, dex_caches.Get());
image_roots->Set<false>(ImageHeader::kClassRoots, class_linker->GetClassRoots());
for (int i = 0; i < ImageHeader::kImageRootsMax; i++) {
CHECK(image_roots->Get(i) != NULL);
}
- return image_roots.get();
+ return image_roots.Get();
}
// Walk instance fields of the given Class. Separate function to allow recursion on the super
// class.
void ImageWriter::WalkInstanceFields(mirror::Object* obj, mirror::Class* klass) {
// Visit fields of parent classes first.
- SirtRef<mirror::Class> sirt_class(Thread::Current(), klass);
- mirror::Class* super = sirt_class->GetSuperClass();
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::Class> h_class(hs.NewHandle(klass));
+ mirror::Class* super = h_class->GetSuperClass();
if (super != nullptr) {
WalkInstanceFields(obj, super);
}
//
- size_t num_reference_fields = sirt_class->NumReferenceInstanceFields();
+ size_t num_reference_fields = h_class->NumReferenceInstanceFields();
for (size_t i = 0; i < num_reference_fields; ++i) {
- mirror::ArtField* field = sirt_class->GetInstanceField(i);
+ mirror::ArtField* field = h_class->GetInstanceField(i);
MemberOffset field_offset = field->GetOffset();
mirror::Object* value = obj->GetFieldObject<mirror::Object>(field_offset);
if (value != nullptr) {
@@ -460,28 +461,28 @@ void ImageWriter::WalkInstanceFields(mirror::Object* obj, mirror::Class* klass)
void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) {
if (!IsImageOffsetAssigned(obj)) {
// Walk instance fields of all objects
- Thread* self = Thread::Current();
- SirtRef<mirror::Object> sirt_obj(self, obj);
- SirtRef<mirror::Class> klass(self, obj->GetClass());
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::Object> h_obj(hs.NewHandle(obj));
+ Handle<mirror::Class> klass(hs.NewHandle(obj->GetClass()));
// visit the object itself.
- CalculateObjectOffsets(sirt_obj.get());
- WalkInstanceFields(sirt_obj.get(), klass.get());
+ CalculateObjectOffsets(h_obj.Get());
+ WalkInstanceFields(h_obj.Get(), klass.Get());
// Walk static fields of a Class.
- if (sirt_obj->IsClass()) {
+ if (h_obj->IsClass()) {
size_t num_static_fields = klass->NumReferenceStaticFields();
for (size_t i = 0; i < num_static_fields; ++i) {
mirror::ArtField* field = klass->GetStaticField(i);
MemberOffset field_offset = field->GetOffset();
- mirror::Object* value = sirt_obj->GetFieldObject<mirror::Object>(field_offset);
+ mirror::Object* value = h_obj->GetFieldObject<mirror::Object>(field_offset);
if (value != nullptr) {
WalkFieldsInOrder(value);
}
}
- } else if (sirt_obj->IsObjectArray()) {
+ } else if (h_obj->IsObjectArray()) {
// Walk elements of an object array.
- int32_t length = sirt_obj->AsObjectArray<mirror::Object>()->GetLength();
+ int32_t length = h_obj->AsObjectArray<mirror::Object>()->GetLength();
for (int32_t i = 0; i < length; i++) {
- mirror::ObjectArray<mirror::Object>* obj_array = sirt_obj->AsObjectArray<mirror::Object>();
+ mirror::ObjectArray<mirror::Object>* obj_array = h_obj->AsObjectArray<mirror::Object>();
mirror::Object* value = obj_array->Get(i);
if (value != nullptr) {
WalkFieldsInOrder(value);
@@ -500,7 +501,8 @@ void ImageWriter::WalkFieldsCallback(mirror::Object* obj, void* arg) {
void ImageWriter::CalculateNewObjectOffsets(size_t oat_loaded_size, size_t oat_data_offset) {
CHECK_NE(0U, oat_loaded_size);
Thread* self = Thread::Current();
- SirtRef<ObjectArray<Object> > image_roots(self, CreateImageRoots());
+ StackHandleScope<1> hs(self);
+ Handle<ObjectArray<Object>> image_roots(hs.NewHandle(CreateImageRoots()));
gc::Heap* heap = Runtime::Current()->GetHeap();
DCHECK_EQ(0U, image_end_);
@@ -533,7 +535,7 @@ void ImageWriter::CalculateNewObjectOffsets(size_t oat_loaded_size, size_t oat_d
static_cast<uint32_t>(image_end_),
RoundUp(image_end_, kPageSize),
RoundUp(bitmap_bytes, kPageSize),
- PointerToLowMemUInt32(GetImageAddress(image_roots.get())),
+ PointerToLowMemUInt32(GetImageAddress(image_roots.Get())),
oat_file_->GetOatHeader().GetChecksum(),
PointerToLowMemUInt32(oat_file_begin),
PointerToLowMemUInt32(oat_data_begin_),
@@ -691,9 +693,10 @@ void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) {
static ArtMethod* GetTargetMethod(const CompilerDriver::CallPatchInformation* patch)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- Thread* self = Thread::Current();
- SirtRef<mirror::DexCache> dex_cache(self, class_linker->FindDexCache(*patch->GetTargetDexFile()));
- SirtRef<mirror::ClassLoader> class_loader(self, nullptr);
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(
+ hs.NewHandle(class_linker->FindDexCache(*patch->GetTargetDexFile())));
+ auto class_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
ArtMethod* method = class_linker->ResolveMethod(*patch->GetTargetDexFile(),
patch->GetTargetMethodIdx(),
dex_cache,
@@ -714,9 +717,9 @@ static ArtMethod* GetTargetMethod(const CompilerDriver::CallPatchInformation* pa
static Class* GetTargetType(const CompilerDriver::TypePatchInformation* patch)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- Thread* self = Thread::Current();
- SirtRef<mirror::DexCache> dex_cache(self, class_linker->FindDexCache(patch->GetDexFile()));
- SirtRef<mirror::ClassLoader> class_loader(self, nullptr);
+ StackHandleScope<2> hs(Thread::Current());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(patch->GetDexFile())));
+ auto class_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
Class* klass = class_linker->ResolveType(patch->GetDexFile(),
patch->GetTargetTypeIdx(),
dex_cache,
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 6b5e55e..6035689 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -48,7 +48,9 @@ class JniCompilerTest : public CommonCompilerTest {
void CompileForTest(jobject class_loader, bool direct,
const char* method_name, const char* method_sig) {
ScopedObjectAccess soa(Thread::Current());
- SirtRef<mirror::ClassLoader> loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(class_loader));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader)));
// Compile the native method before starting the runtime
mirror::Class* c = class_linker_->FindClass(soa.Self(), "LMyClassNatives;", loader);
mirror::ArtMethod* method;
@@ -153,8 +155,9 @@ TEST_F(JniCompilerTest, CompileAndRunIntMethodThroughStub) {
ScopedObjectAccess soa(Thread::Current());
std::string reason;
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(class_loader_));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader_)));
ASSERT_TRUE(
Runtime::Current()->GetJavaVM()->LoadNativeLibrary("", class_loader, &reason)) << reason;
@@ -169,8 +172,9 @@ TEST_F(JniCompilerTest, CompileAndRunStaticIntMethodThroughStub) {
ScopedObjectAccess soa(Thread::Current());
std::string reason;
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(),
- soa.Decode<mirror::ClassLoader*>(class_loader_));
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader_)));
ASSERT_TRUE(
Runtime::Current()->GetJavaVM()->LoadNativeLibrary("", class_loader, &reason)) << reason;
diff --git a/compiler/jni/portable/jni_compiler.cc b/compiler/jni/portable/jni_compiler.cc
index 0c14346..d2f54f8 100644
--- a/compiler/jni/portable/jni_compiler.cc
+++ b/compiler/jni/portable/jni_compiler.cc
@@ -98,7 +98,7 @@ CompiledMethod* JniCompiler::Compile() {
arg_begin = arg_iter;
// Count the number of Object* arguments
- uint32_t sirt_size = 1;
+ uint32_t handle_scope_size = 1;
// "this" object pointer for non-static
// "class" object pointer for static
for (unsigned i = 0; arg_iter != arg_end; ++i, ++arg_iter) {
@@ -106,12 +106,12 @@ CompiledMethod* JniCompiler::Compile() {
arg_iter->setName(StringPrintf("a%u", i));
#endif
if (arg_iter->getType() == irb_.getJObjectTy()) {
- ++sirt_size;
+ ++handle_scope_size;
}
}
// Shadow stack
- ::llvm::StructType* shadow_frame_type = irb_.getShadowFrameTy(sirt_size);
+ ::llvm::StructType* shadow_frame_type = irb_.getShadowFrameTy(handle_scope_size);
::llvm::AllocaInst* shadow_frame_ = irb_.CreateAlloca(shadow_frame_type);
// Store the dex pc
@@ -123,7 +123,7 @@ CompiledMethod* JniCompiler::Compile() {
// Push the shadow frame
::llvm::Value* shadow_frame_upcast = irb_.CreateConstGEP2_32(shadow_frame_, 0, 0);
::llvm::Value* old_shadow_frame =
- irb_.Runtime().EmitPushShadowFrame(shadow_frame_upcast, method_object_addr, sirt_size);
+ irb_.Runtime().EmitPushShadowFrame(shadow_frame_upcast, method_object_addr, handle_scope_size);
// Get JNIEnv
::llvm::Value* jni_env_object_addr =
@@ -148,35 +148,35 @@ CompiledMethod* JniCompiler::Compile() {
// Variables for GetElementPtr
::llvm::Value* gep_index[] = {
irb_.getInt32(0), // No displacement for shadow frame pointer
- irb_.getInt32(1), // SIRT
+ irb_.getInt32(1), // handle scope
NULL,
};
- size_t sirt_member_index = 0;
+ size_t handle_scope_member_index = 0;
- // Store the "this object or class object" to SIRT
- gep_index[2] = irb_.getInt32(sirt_member_index++);
- ::llvm::Value* sirt_field_addr = irb_.CreateBitCast(irb_.CreateGEP(shadow_frame_, gep_index),
+ // Store the "this object or class object" to handle scope
+ gep_index[2] = irb_.getInt32(handle_scope_member_index++);
+ ::llvm::Value* handle_scope_field_addr = irb_.CreateBitCast(irb_.CreateGEP(shadow_frame_, gep_index),
irb_.getJObjectTy()->getPointerTo());
- irb_.CreateStore(this_object_or_class_object, sirt_field_addr, kTBAAShadowFrame);
+ irb_.CreateStore(this_object_or_class_object, handle_scope_field_addr, kTBAAShadowFrame);
// Push the "this object or class object" to out args
- this_object_or_class_object = irb_.CreateBitCast(sirt_field_addr, irb_.getJObjectTy());
+ this_object_or_class_object = irb_.CreateBitCast(handle_scope_field_addr, irb_.getJObjectTy());
args.push_back(this_object_or_class_object);
- // Store arguments to SIRT, and push back to args
+ // Store arguments to handle scope, and push back to args
for (arg_iter = arg_begin; arg_iter != arg_end; ++arg_iter) {
if (arg_iter->getType() == irb_.getJObjectTy()) {
- // Store the reference type arguments to SIRT
- gep_index[2] = irb_.getInt32(sirt_member_index++);
- ::llvm::Value* sirt_field_addr = irb_.CreateBitCast(irb_.CreateGEP(shadow_frame_, gep_index),
+ // Store the reference type arguments to handle scope
+ gep_index[2] = irb_.getInt32(handle_scope_member_index++);
+ ::llvm::Value* handle_scope_field_addr = irb_.CreateBitCast(irb_.CreateGEP(shadow_frame_, gep_index),
irb_.getJObjectTy()->getPointerTo());
- irb_.CreateStore(arg_iter, sirt_field_addr, kTBAAShadowFrame);
- // Note null is placed in the SIRT but the jobject passed to the native code must be null
- // (not a pointer into the SIRT as with regular references).
+ irb_.CreateStore(arg_iter, handle_scope_field_addr, kTBAAShadowFrame);
+ // Note null is placed in the handle scope but the jobject passed to the native code must be null
+ // (not a pointer into the handle scope as with regular references).
::llvm::Value* equal_null = irb_.CreateICmpEQ(arg_iter, irb_.getJNull());
::llvm::Value* arg =
irb_.CreateSelect(equal_null,
irb_.getJNull(),
- irb_.CreateBitCast(sirt_field_addr, irb_.getJObjectTy()));
+ irb_.CreateBitCast(handle_scope_field_addr, irb_.getJObjectTy()));
args.push_back(arg);
} else {
args.push_back(arg_iter);
diff --git a/compiler/jni/quick/arm/calling_convention_arm.cc b/compiler/jni/quick/arm/calling_convention_arm.cc
index ae18d2e..649a80f 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.cc
+++ b/compiler/jni/quick/arm/calling_convention_arm.cc
@@ -144,10 +144,10 @@ ManagedRegister ArmJniCallingConvention::ReturnScratchRegister() const {
size_t ArmJniCallingConvention::FrameSize() {
// Method*, LR and callee save area size, local reference segment state
size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kFramePointerSize;
- // References plus 2 words for SIRT header
- size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSizeTarget(kFramePointerSize, ReferenceCount());
+ // References plus 2 words for HandleScope header
+ size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSizeTarget(kFramePointerSize, ReferenceCount());
// Plus return value spill area size
- return RoundUp(frame_data_size + sirt_size + SizeOfReturnValue(), kStackAlignment);
+ return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
size_t ArmJniCallingConvention::OutArgSize() {
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc
index 6212a23..ffd27ee 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.cc
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc
@@ -197,10 +197,10 @@ ManagedRegister Arm64JniCallingConvention::ReturnScratchRegister() const {
size_t Arm64JniCallingConvention::FrameSize() {
// Method*, callee save area size, local reference segment state
size_t frame_data_size = ((1 + CalleeSaveRegisters().size()) * kFramePointerSize) + sizeof(uint32_t);
- // References plus 2 words for SIRT header
- size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSizeTarget(kFramePointerSize, ReferenceCount());
+ // References plus 2 words for HandleScope header
+ size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSizeTarget(kFramePointerSize, ReferenceCount());
// Plus return value spill area size
- return RoundUp(frame_data_size + sirt_size + SizeOfReturnValue(), kStackAlignment);
+ return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
size_t Arm64JniCallingConvention::OutArgSize() {
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index a99a4c2..95c2d40 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -126,8 +126,8 @@ size_t JniCallingConvention::ReferenceCount() const {
}
FrameOffset JniCallingConvention::SavedLocalReferenceCookieOffset() const {
- size_t references_size = sirt_pointer_size_ * ReferenceCount(); // size excluding header
- return FrameOffset(SirtReferencesOffset().Int32Value() + references_size);
+ size_t references_size = handle_scope_pointer_size_ * ReferenceCount(); // size excluding header
+ return FrameOffset(HandleerencesOffset().Int32Value() + references_size);
}
FrameOffset JniCallingConvention::ReturnValueSaveLocation() const {
@@ -219,13 +219,13 @@ bool JniCallingConvention::IsCurrentParamALong() {
}
}
-// Return position of SIRT entry holding reference at the current iterator
+// Return position of handle scope entry holding reference at the current iterator
// position
-FrameOffset JniCallingConvention::CurrentParamSirtEntryOffset() {
+FrameOffset JniCallingConvention::CurrentParamHandleScopeEntryOffset() {
CHECK(IsCurrentParamAReference());
- CHECK_LT(SirtLinkOffset(), SirtNumRefsOffset());
- int result = SirtReferencesOffset().Int32Value() + itr_refs_ * sirt_pointer_size_;
- CHECK_GT(result, SirtNumRefsOffset().Int32Value());
+ CHECK_LT(HandleScopeLinkOffset(), HandleScopeNumRefsOffset());
+ int result = HandleerencesOffset().Int32Value() + itr_refs_ * handle_scope_pointer_size_;
+ CHECK_GT(result, HandleScopeNumRefsOffset().Int32Value());
return FrameOffset(result);
}
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
index 18afd58..2a6e7d9 100644
--- a/compiler/jni/quick/calling_convention.h
+++ b/compiler/jni/quick/calling_convention.h
@@ -18,7 +18,7 @@
#define ART_COMPILER_JNI_QUICK_CALLING_CONVENTION_H_
#include <vector>
-#include "stack_indirect_reference_table.h"
+#include "handle_scope.h"
#include "thread.h"
#include "utils/managed_register.h"
@@ -73,7 +73,7 @@ class CallingConvention {
: itr_slots_(0), itr_refs_(0), itr_args_(0), itr_longs_and_doubles_(0),
itr_float_and_doubles_(0), displacement_(0),
frame_pointer_size_(frame_pointer_size),
- sirt_pointer_size_(sizeof(StackReference<mirror::Object>)),
+ handle_scope_pointer_size_(sizeof(StackReference<mirror::Object>)),
is_static_(is_static), is_synchronized_(is_synchronized),
shorty_(shorty) {
num_args_ = (is_static ? 0 : 1) + strlen(shorty) - 1;
@@ -197,8 +197,8 @@ class CallingConvention {
FrameOffset displacement_;
// The size of a reference.
const size_t frame_pointer_size_;
- // The size of a reference entry within the SIRT.
- const size_t sirt_pointer_size_;
+ // The size of a reference entry within the handle scope.
+ const size_t handle_scope_pointer_size_;
private:
const bool is_static_;
@@ -315,26 +315,25 @@ class JniCallingConvention : public CallingConvention {
virtual FrameOffset CurrentParamStackOffset() = 0;
// Iterator interface extension for JNI
- FrameOffset CurrentParamSirtEntryOffset();
+ FrameOffset CurrentParamHandleScopeEntryOffset();
- // Position of SIRT and interior fields
- FrameOffset SirtOffset() const {
+ // Position of handle scope and interior fields
+ FrameOffset HandleScopeOffset() const {
return FrameOffset(this->displacement_.Int32Value() + frame_pointer_size_); // above Method*
}
- FrameOffset SirtLinkOffset() const {
- return FrameOffset(SirtOffset().Int32Value() +
- StackIndirectReferenceTable::LinkOffset(frame_pointer_size_));
+ FrameOffset HandleScopeLinkOffset() const {
+ return FrameOffset(HandleScopeOffset().Int32Value() + HandleScope::LinkOffset(frame_pointer_size_));
}
- FrameOffset SirtNumRefsOffset() const {
- return FrameOffset(SirtOffset().Int32Value() +
- StackIndirectReferenceTable::NumberOfReferencesOffset(frame_pointer_size_));
+ FrameOffset HandleScopeNumRefsOffset() const {
+ return FrameOffset(HandleScopeOffset().Int32Value() +
+ HandleScope::NumberOfReferencesOffset(frame_pointer_size_));
}
- FrameOffset SirtReferencesOffset() const {
- return FrameOffset(SirtOffset().Int32Value() +
- StackIndirectReferenceTable::ReferencesOffset(frame_pointer_size_));
+ FrameOffset HandleerencesOffset() const {
+ return FrameOffset(HandleScopeOffset().Int32Value() +
+ HandleScope::ReferencesOffset(frame_pointer_size_));
}
virtual ~JniCallingConvention() {}
@@ -350,7 +349,7 @@ class JniCallingConvention : public CallingConvention {
size_t frame_pointer_size)
: CallingConvention(is_static, is_synchronized, shorty, frame_pointer_size) {}
- // Number of stack slots for outgoing arguments, above which the SIRT is
+ // Number of stack slots for outgoing arguments, above which the handle scope is
// located
virtual size_t NumberOfOutgoingStackArgs() = 0;
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 5a22170..20f9f4b 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -103,54 +103,54 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
const std::vector<ManagedRegister>& callee_save_regs = main_jni_conv->CalleeSaveRegisters();
__ BuildFrame(frame_size, mr_conv->MethodRegister(), callee_save_regs, mr_conv->EntrySpills());
- // 2. Set up the StackIndirectReferenceTable
+ // 2. Set up the HandleScope
mr_conv->ResetIterator(FrameOffset(frame_size));
main_jni_conv->ResetIterator(FrameOffset(0));
- __ StoreImmediateToFrame(main_jni_conv->SirtNumRefsOffset(),
+ __ StoreImmediateToFrame(main_jni_conv->HandleScopeNumRefsOffset(),
main_jni_conv->ReferenceCount(),
mr_conv->InterproceduralScratchRegister());
if (is_64_bit_target) {
- __ CopyRawPtrFromThread64(main_jni_conv->SirtLinkOffset(),
- Thread::TopSirtOffset<8>(),
+ __ CopyRawPtrFromThread64(main_jni_conv->HandleScopeLinkOffset(),
+ Thread::TopHandleScopeOffset<8>(),
mr_conv->InterproceduralScratchRegister());
- __ StoreStackOffsetToThread64(Thread::TopSirtOffset<8>(),
- main_jni_conv->SirtOffset(),
+ __ StoreStackOffsetToThread64(Thread::TopHandleScopeOffset<8>(),
+ main_jni_conv->HandleScopeOffset(),
mr_conv->InterproceduralScratchRegister());
} else {
- __ CopyRawPtrFromThread32(main_jni_conv->SirtLinkOffset(),
- Thread::TopSirtOffset<4>(),
+ __ CopyRawPtrFromThread32(main_jni_conv->HandleScopeLinkOffset(),
+ Thread::TopHandleScopeOffset<4>(),
mr_conv->InterproceduralScratchRegister());
- __ StoreStackOffsetToThread32(Thread::TopSirtOffset<4>(),
- main_jni_conv->SirtOffset(),
+ __ StoreStackOffsetToThread32(Thread::TopHandleScopeOffset<4>(),
+ main_jni_conv->HandleScopeOffset(),
mr_conv->InterproceduralScratchRegister());
}
- // 3. Place incoming reference arguments into SIRT
+ // 3. Place incoming reference arguments into handle scope
main_jni_conv->Next(); // Skip JNIEnv*
// 3.5. Create Class argument for static methods out of passed method
if (is_static) {
- FrameOffset sirt_offset = main_jni_conv->CurrentParamSirtEntryOffset();
- // Check sirt offset is within frame
- CHECK_LT(sirt_offset.Uint32Value(), frame_size);
+ FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
+ // Check handle scope offset is within frame
+ CHECK_LT(handle_scope_offset.Uint32Value(), frame_size);
__ LoadRef(main_jni_conv->InterproceduralScratchRegister(),
mr_conv->MethodRegister(), mirror::ArtMethod::DeclaringClassOffset());
__ VerifyObject(main_jni_conv->InterproceduralScratchRegister(), false);
- __ StoreRef(sirt_offset, main_jni_conv->InterproceduralScratchRegister());
- main_jni_conv->Next(); // in SIRT so move to next argument
+ __ StoreRef(handle_scope_offset, main_jni_conv->InterproceduralScratchRegister());
+ main_jni_conv->Next(); // in handle scope so move to next argument
}
while (mr_conv->HasNext()) {
CHECK(main_jni_conv->HasNext());
bool ref_param = main_jni_conv->IsCurrentParamAReference();
CHECK(!ref_param || mr_conv->IsCurrentParamAReference());
- // References need placing in SIRT and the entry value passing
+ // References need placing in handle scope and the entry value passing
if (ref_param) {
- // Compute SIRT entry, note null is placed in the SIRT but its boxed value
+ // Compute handle scope entry, note null is placed in the handle scope but its boxed value
// must be NULL
- FrameOffset sirt_offset = main_jni_conv->CurrentParamSirtEntryOffset();
- // Check SIRT offset is within frame and doesn't run into the saved segment state
- CHECK_LT(sirt_offset.Uint32Value(), frame_size);
- CHECK_NE(sirt_offset.Uint32Value(),
+ FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
+ // Check handle scope offset is within frame and doesn't run into the saved segment state
+ CHECK_LT(handle_scope_offset.Uint32Value(), frame_size);
+ CHECK_NE(handle_scope_offset.Uint32Value(),
main_jni_conv->SavedLocalReferenceCookieOffset().Uint32Value());
bool input_in_reg = mr_conv->IsCurrentParamInRegister();
bool input_on_stack = mr_conv->IsCurrentParamOnStack();
@@ -159,11 +159,11 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
if (input_in_reg) {
ManagedRegister in_reg = mr_conv->CurrentParamRegister();
__ VerifyObject(in_reg, mr_conv->IsCurrentArgPossiblyNull());
- __ StoreRef(sirt_offset, in_reg);
+ __ StoreRef(handle_scope_offset, in_reg);
} else if (input_on_stack) {
FrameOffset in_off = mr_conv->CurrentParamStackOffset();
__ VerifyObject(in_off, mr_conv->IsCurrentArgPossiblyNull());
- __ CopyRef(sirt_offset, in_off,
+ __ CopyRef(handle_scope_offset, in_off,
mr_conv->InterproceduralScratchRegister());
}
}
@@ -197,20 +197,20 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
ThreadOffset<8> jni_start64 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(8, pJniMethodStartSynchronized)
: QUICK_ENTRYPOINT_OFFSET(8, pJniMethodStart);
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
- FrameOffset locked_object_sirt_offset(0);
+ FrameOffset locked_object_handle_scope_offset(0);
if (is_synchronized) {
// Pass object for locking.
main_jni_conv->Next(); // Skip JNIEnv.
- locked_object_sirt_offset = main_jni_conv->CurrentParamSirtEntryOffset();
+ locked_object_handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
if (main_jni_conv->IsCurrentParamOnStack()) {
FrameOffset out_off = main_jni_conv->CurrentParamStackOffset();
- __ CreateSirtEntry(out_off, locked_object_sirt_offset,
+ __ CreateHandleScopeEntry(out_off, locked_object_handle_scope_offset,
mr_conv->InterproceduralScratchRegister(),
false);
} else {
ManagedRegister out_reg = main_jni_conv->CurrentParamRegister();
- __ CreateSirtEntry(out_reg, locked_object_sirt_offset,
+ __ CreateHandleScopeEntry(out_reg, locked_object_handle_scope_offset,
ManagedRegister::NoRegister(), false);
}
main_jni_conv->Next();
@@ -274,15 +274,15 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
mr_conv->ResetIterator(FrameOffset(frame_size+main_out_arg_size));
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
main_jni_conv->Next(); // Skip JNIEnv*
- FrameOffset sirt_offset = main_jni_conv->CurrentParamSirtEntryOffset();
+ FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
if (main_jni_conv->IsCurrentParamOnStack()) {
FrameOffset out_off = main_jni_conv->CurrentParamStackOffset();
- __ CreateSirtEntry(out_off, sirt_offset,
+ __ CreateHandleScopeEntry(out_off, handle_scope_offset,
mr_conv->InterproceduralScratchRegister(),
false);
} else {
ManagedRegister out_reg = main_jni_conv->CurrentParamRegister();
- __ CreateSirtEntry(out_reg, sirt_offset,
+ __ CreateHandleScopeEntry(out_reg, handle_scope_offset,
ManagedRegister::NoRegister(), false);
}
}
@@ -369,12 +369,12 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
// Pass object for unlocking.
if (end_jni_conv->IsCurrentParamOnStack()) {
FrameOffset out_off = end_jni_conv->CurrentParamStackOffset();
- __ CreateSirtEntry(out_off, locked_object_sirt_offset,
+ __ CreateHandleScopeEntry(out_off, locked_object_handle_scope_offset,
end_jni_conv->InterproceduralScratchRegister(),
false);
} else {
ManagedRegister out_reg = end_jni_conv->CurrentParamRegister();
- __ CreateSirtEntry(out_reg, locked_object_sirt_offset,
+ __ CreateHandleScopeEntry(out_reg, locked_object_handle_scope_offset,
ManagedRegister::NoRegister(), false);
}
end_jni_conv->Next();
@@ -438,7 +438,7 @@ static void CopyParameter(Assembler* jni_asm,
size_t frame_size, size_t out_arg_size) {
bool input_in_reg = mr_conv->IsCurrentParamInRegister();
bool output_in_reg = jni_conv->IsCurrentParamInRegister();
- FrameOffset sirt_offset(0);
+ FrameOffset handle_scope_offset(0);
bool null_allowed = false;
bool ref_param = jni_conv->IsCurrentParamAReference();
CHECK(!ref_param || mr_conv->IsCurrentParamAReference());
@@ -449,21 +449,21 @@ static void CopyParameter(Assembler* jni_asm,
} else {
CHECK(jni_conv->IsCurrentParamOnStack());
}
- // References need placing in SIRT and the entry address passing
+ // References need placing in handle scope and the entry address passing
if (ref_param) {
null_allowed = mr_conv->IsCurrentArgPossiblyNull();
- // Compute SIRT offset. Note null is placed in the SIRT but the jobject
- // passed to the native code must be null (not a pointer into the SIRT
+ // Compute handle scope offset. Note null is placed in the handle scope but the jobject
+ // passed to the native code must be null (not a pointer into the handle scope
// as with regular references).
- sirt_offset = jni_conv->CurrentParamSirtEntryOffset();
- // Check SIRT offset is within frame.
- CHECK_LT(sirt_offset.Uint32Value(), (frame_size + out_arg_size));
+ handle_scope_offset = jni_conv->CurrentParamHandleScopeEntryOffset();
+ // Check handle scope offset is within frame.
+ CHECK_LT(handle_scope_offset.Uint32Value(), (frame_size + out_arg_size));
}
if (input_in_reg && output_in_reg) {
ManagedRegister in_reg = mr_conv->CurrentParamRegister();
ManagedRegister out_reg = jni_conv->CurrentParamRegister();
if (ref_param) {
- __ CreateSirtEntry(out_reg, sirt_offset, in_reg, null_allowed);
+ __ CreateHandleScopeEntry(out_reg, handle_scope_offset, in_reg, null_allowed);
} else {
if (!mr_conv->IsCurrentParamOnStack()) {
// regular non-straddling move
@@ -475,7 +475,7 @@ static void CopyParameter(Assembler* jni_asm,
} else if (!input_in_reg && !output_in_reg) {
FrameOffset out_off = jni_conv->CurrentParamStackOffset();
if (ref_param) {
- __ CreateSirtEntry(out_off, sirt_offset, mr_conv->InterproceduralScratchRegister(),
+ __ CreateHandleScopeEntry(out_off, handle_scope_offset, mr_conv->InterproceduralScratchRegister(),
null_allowed);
} else {
FrameOffset in_off = mr_conv->CurrentParamStackOffset();
@@ -489,7 +489,7 @@ static void CopyParameter(Assembler* jni_asm,
// Check that incoming stack arguments are above the current stack frame.
CHECK_GT(in_off.Uint32Value(), frame_size);
if (ref_param) {
- __ CreateSirtEntry(out_reg, sirt_offset, ManagedRegister::NoRegister(), null_allowed);
+ __ CreateHandleScopeEntry(out_reg, handle_scope_offset, ManagedRegister::NoRegister(), null_allowed);
} else {
size_t param_size = mr_conv->CurrentParamSize();
CHECK_EQ(param_size, jni_conv->CurrentParamSize());
@@ -502,8 +502,8 @@ static void CopyParameter(Assembler* jni_asm,
// Check outgoing argument is within frame
CHECK_LT(out_off.Uint32Value(), frame_size);
if (ref_param) {
- // TODO: recycle value in in_reg rather than reload from SIRT
- __ CreateSirtEntry(out_off, sirt_offset, mr_conv->InterproceduralScratchRegister(),
+ // TODO: recycle value in in_reg rather than reload from handle scope
+ __ CreateHandleScopeEntry(out_off, handle_scope_offset, mr_conv->InterproceduralScratchRegister(),
null_allowed);
} else {
size_t param_size = mr_conv->CurrentParamSize();
diff --git a/compiler/jni/quick/mips/calling_convention_mips.cc b/compiler/jni/quick/mips/calling_convention_mips.cc
index 8e1c0c7..0402fe6 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.cc
+++ b/compiler/jni/quick/mips/calling_convention_mips.cc
@@ -148,10 +148,10 @@ ManagedRegister MipsJniCallingConvention::ReturnScratchRegister() const {
size_t MipsJniCallingConvention::FrameSize() {
// Method*, LR and callee save area size, local reference segment state
size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kFramePointerSize;
- // References plus 2 words for SIRT header
- size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSizeTarget(kFramePointerSize, ReferenceCount());
+ // References plus 2 words for HandleScope header
+ size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSizeTarget(kFramePointerSize, ReferenceCount());
// Plus return value spill area size
- return RoundUp(frame_data_size + sirt_size + SizeOfReturnValue(), kStackAlignment);
+ return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
size_t MipsJniCallingConvention::OutArgSize() {
diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc
index 153f953..97b4cdf 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.cc
+++ b/compiler/jni/quick/x86/calling_convention_x86.cc
@@ -125,10 +125,10 @@ uint32_t X86JniCallingConvention::CoreSpillMask() const {
size_t X86JniCallingConvention::FrameSize() {
// Method*, return address and callee save area size, local reference segment state
size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kFramePointerSize;
- // References plus 2 words for SIRT header
- size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSizeTarget(kFramePointerSize, ReferenceCount());
+ // References plus 2 words for HandleScope header
+ size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSizeTarget(kFramePointerSize, ReferenceCount());
// Plus return value spill area size
- return RoundUp(frame_data_size + sirt_size + SizeOfReturnValue(), kStackAlignment);
+ return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
size_t X86JniCallingConvention::OutArgSize() {
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
index 52490e6..4871c87 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
@@ -140,10 +140,10 @@ uint32_t X86_64JniCallingConvention::CoreSpillMask() const {
size_t X86_64JniCallingConvention::FrameSize() {
// Method*, return address and callee save area size, local reference segment state
size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kFramePointerSize;
- // References plus link_ (pointer) and number_of_references_ (uint32_t) for SIRT header
- size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSizeTarget(kFramePointerSize, ReferenceCount());
+ // References plus link_ (pointer) and number_of_references_ (uint32_t) for HandleScope header
+ size_t handle_scope_size = HandleScope::GetAlignedHandleScopeSizeTarget(kFramePointerSize, ReferenceCount());
// Plus return value spill area size
- return RoundUp(frame_data_size + sirt_size + SizeOfReturnValue(), kStackAlignment);
+ return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
size_t X86_64JniCallingConvention::OutArgSize() {
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 66972cb..558ff1f 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -153,7 +153,8 @@ TEST_F(OatTest, WriteRead) {
num_virtual_methods = it.NumVirtualMethods();
}
const char* descriptor = dex_file->GetClassDescriptor(class_def);
- SirtRef<mirror::ClassLoader> loader(soa.Self(), nullptr);
+ StackHandleScope<1> hs(soa.Self());
+ auto loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
mirror::Class* klass = class_linker->FindClass(soa.Self(), descriptor, loader);
const OatFile::OatClass oat_class = oat_dex_file->GetOatClass(i);
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 39311d9..bace25c 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -33,7 +33,7 @@
#include "output_stream.h"
#include "safe_map.h"
#include "scoped_thread_state_change.h"
-#include "sirt_ref-inl.h"
+#include "handle_scope-inl.h"
#include "verifier/method_verifier.h"
namespace art {
@@ -511,8 +511,9 @@ class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
InvokeType invoke_type = it.GetMethodInvokeType(dex_file_->GetClassDef(class_def_index_));
// Unchecked as we hold mutator_lock_ on entry.
ScopedObjectAccessUnchecked soa(Thread::Current());
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), linker->FindDexCache(*dex_file_));
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), nullptr);
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->FindDexCache(*dex_file_)));
+ auto class_loader = hs.NewHandle<mirror::ClassLoader>(nullptr);
mirror::ArtMethod* method = linker->ResolveMethod(*dex_file_, it.GetMemberIndex(), dex_cache,
class_loader, nullptr, invoke_type);
CHECK(method != NULL);
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index 5c839dd..64685c1 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -1752,53 +1752,53 @@ void ArmAssembler::MemoryBarrier(ManagedRegister mscratch) {
#endif
}
-void ArmAssembler::CreateSirtEntry(ManagedRegister mout_reg,
- FrameOffset sirt_offset,
+void ArmAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
+ FrameOffset handle_scope_offset,
ManagedRegister min_reg, bool null_allowed) {
ArmManagedRegister out_reg = mout_reg.AsArm();
ArmManagedRegister in_reg = min_reg.AsArm();
CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
CHECK(out_reg.IsCoreRegister()) << out_reg;
if (null_allowed) {
- // Null values get a SIRT entry value of 0. Otherwise, the SIRT entry is
- // the address in the SIRT holding the reference.
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
// e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
if (in_reg.IsNoRegister()) {
LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
- SP, sirt_offset.Int32Value());
+ SP, handle_scope_offset.Int32Value());
in_reg = out_reg;
}
cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
if (!out_reg.Equals(in_reg)) {
LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
}
- AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE);
+ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
} else {
- AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL);
+ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
}
}
-void ArmAssembler::CreateSirtEntry(FrameOffset out_off,
- FrameOffset sirt_offset,
+void ArmAssembler::CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handle_scope_offset,
ManagedRegister mscratch,
bool null_allowed) {
ArmManagedRegister scratch = mscratch.AsArm();
CHECK(scratch.IsCoreRegister()) << scratch;
if (null_allowed) {
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP,
- sirt_offset.Int32Value());
- // Null values get a SIRT entry value of 0. Otherwise, the sirt entry is
- // the address in the SIRT holding the reference.
- // e.g. scratch = (scratch == 0) ? 0 : (SP+sirt_offset)
+ handle_scope_offset.Int32Value());
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
+ // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
cmp(scratch.AsCoreRegister(), ShifterOperand(0));
- AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE);
+ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
} else {
- AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL);
+ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
}
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
}
-void ArmAssembler::LoadReferenceFromSirt(ManagedRegister mout_reg,
+void ArmAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
ManagedRegister min_reg) {
ArmManagedRegister out_reg = mout_reg.AsArm();
ArmManagedRegister in_reg = min_reg.AsArm();
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index f5be04a..396e603 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -521,20 +521,20 @@ class ArmAssembler FINAL : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the SIRT entry to see if the value is
+ // that can be used to avoid loading the handle scope entry to see if the value is
// NULL.
- void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset, ManagedRegister in_reg,
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed.
- void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset, ManagedRegister scratch,
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister scratch,
bool null_allowed) OVERRIDE;
- // src holds a SIRT entry (Object**) load this into dst
- void LoadReferenceFromSirt(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+ // src holds a handle scope entry (Object**) load this into dst
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index f486b3c..27188b2 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -539,52 +539,52 @@ void Arm64Assembler::CallFromThread64(ThreadOffset<8> /*offset*/, ManagedRegiste
UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant";
}
-void Arm64Assembler::CreateSirtEntry(ManagedRegister m_out_reg, FrameOffset sirt_offs,
+void Arm64Assembler::CreateHandleScopeEntry(ManagedRegister m_out_reg, FrameOffset handle_scope_offs,
ManagedRegister m_in_reg, bool null_allowed) {
Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
- // For now we only hold stale sirt entries in x registers.
+ // For now we only hold stale handle scope entries in x registers.
CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
CHECK(out_reg.IsCoreRegister()) << out_reg;
if (null_allowed) {
- // Null values get a SIRT entry value of 0. Otherwise, the SIRT entry is
- // the address in the SIRT holding the reference.
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
// e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
if (in_reg.IsNoRegister()) {
LoadWFromOffset(kLoadWord, out_reg.AsOverlappingCoreRegisterLow(), SP,
- sirt_offs.Int32Value());
+ handle_scope_offs.Int32Value());
in_reg = out_reg;
}
___ Cmp(reg_w(in_reg.AsOverlappingCoreRegisterLow()), 0);
if (!out_reg.Equals(in_reg)) {
LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
}
- AddConstant(out_reg.AsCoreRegister(), SP, sirt_offs.Int32Value(), NE);
+ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offs.Int32Value(), NE);
} else {
- AddConstant(out_reg.AsCoreRegister(), SP, sirt_offs.Int32Value(), AL);
+ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offs.Int32Value(), AL);
}
}
-void Arm64Assembler::CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
+void Arm64Assembler::CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handle_scope_offset,
ManagedRegister m_scratch, bool null_allowed) {
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsCoreRegister()) << scratch;
if (null_allowed) {
LoadWFromOffset(kLoadWord, scratch.AsOverlappingCoreRegisterLow(), SP,
- sirt_offset.Int32Value());
- // Null values get a SIRT entry value of 0. Otherwise, the sirt entry is
- // the address in the SIRT holding the reference.
- // e.g. scratch = (scratch == 0) ? 0 : (SP+sirt_offset)
+ handle_scope_offset.Int32Value());
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
+ // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
___ Cmp(reg_w(scratch.AsOverlappingCoreRegisterLow()), 0);
// Move this logic in add constants with flags.
- AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE);
+ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
} else {
- AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL);
+ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
}
StoreToOffset(scratch.AsCoreRegister(), SP, out_off.Int32Value());
}
-void Arm64Assembler::LoadReferenceFromSirt(ManagedRegister m_out_reg,
+void Arm64Assembler::LoadReferenceFromHandleScope(ManagedRegister m_out_reg,
ManagedRegister m_in_reg) {
Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 583150c..c866b29 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -161,20 +161,20 @@ class Arm64Assembler FINAL : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the SIRT entry to see if the value is
+ // that can be used to avoid loading the handle scope entry to see if the value is
// NULL.
- void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset,
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
ManagedRegister in_reg, bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed.
- void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
ManagedRegister scratch, bool null_allowed) OVERRIDE;
- // src holds a SIRT entry (Object**) load this into dst.
- void LoadReferenceFromSirt(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+ // src holds a handle scope entry (Object**) load this into dst.
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 219c87f..19239e1 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -453,20 +453,20 @@ class Assembler {
virtual void GetCurrentThread(FrameOffset dest_offset,
ManagedRegister scratch) = 0;
- // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the SIRT entry to see if the value is
+ // that can be used to avoid loading the handle scope entry to see if the value is
// NULL.
- virtual void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset,
+ virtual void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
ManagedRegister in_reg, bool null_allowed) = 0;
- // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed.
- virtual void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
+ virtual void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
ManagedRegister scratch, bool null_allowed) = 0;
- // src holds a SIRT entry (Object**) load this into dst
- virtual void LoadReferenceFromSirt(ManagedRegister dst,
+ // src holds a handle scope entry (Object**) load this into dst
+ virtual void LoadReferenceFromHandleScope(ManagedRegister dst,
ManagedRegister src) = 0;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index 9001f8a..8001dcd 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -827,8 +827,8 @@ void MipsAssembler::MemoryBarrier(ManagedRegister) {
UNIMPLEMENTED(FATAL) << "no mips implementation";
}
-void MipsAssembler::CreateSirtEntry(ManagedRegister mout_reg,
- FrameOffset sirt_offset,
+void MipsAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
+ FrameOffset handle_scope_offset,
ManagedRegister min_reg, bool null_allowed) {
MipsManagedRegister out_reg = mout_reg.AsMips();
MipsManagedRegister in_reg = min_reg.AsMips();
@@ -836,27 +836,27 @@ void MipsAssembler::CreateSirtEntry(ManagedRegister mout_reg,
CHECK(out_reg.IsCoreRegister()) << out_reg;
if (null_allowed) {
Label null_arg;
- // Null values get a SIRT entry value of 0. Otherwise, the SIRT entry is
- // the address in the SIRT holding the reference.
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
// e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
if (in_reg.IsNoRegister()) {
LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
- SP, sirt_offset.Int32Value());
+ SP, handle_scope_offset.Int32Value());
in_reg = out_reg;
}
if (!out_reg.Equals(in_reg)) {
LoadImmediate(out_reg.AsCoreRegister(), 0);
}
EmitBranch(in_reg.AsCoreRegister(), ZERO, &null_arg, true);
- AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value());
+ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
Bind(&null_arg, false);
} else {
- AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value());
+ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
}
}
-void MipsAssembler::CreateSirtEntry(FrameOffset out_off,
- FrameOffset sirt_offset,
+void MipsAssembler::CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handle_scope_offset,
ManagedRegister mscratch,
bool null_allowed) {
MipsManagedRegister scratch = mscratch.AsMips();
@@ -864,21 +864,21 @@ void MipsAssembler::CreateSirtEntry(FrameOffset out_off,
if (null_allowed) {
Label null_arg;
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP,
- sirt_offset.Int32Value());
- // Null values get a SIRT entry value of 0. Otherwise, the sirt entry is
- // the address in the SIRT holding the reference.
- // e.g. scratch = (scratch == 0) ? 0 : (SP+sirt_offset)
+ handle_scope_offset.Int32Value());
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
+ // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
EmitBranch(scratch.AsCoreRegister(), ZERO, &null_arg, true);
- AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value());
+ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
Bind(&null_arg, false);
} else {
- AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value());
+ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
}
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
}
-// Given a SIRT entry, load the associated reference.
-void MipsAssembler::LoadReferenceFromSirt(ManagedRegister mout_reg,
+// Given a handle scope entry, load the associated reference.
+void MipsAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
ManagedRegister min_reg) {
MipsManagedRegister out_reg = mout_reg.AsMips();
MipsManagedRegister in_reg = min_reg.AsMips();
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 75ee8b9..216cb41 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -238,20 +238,20 @@ class MipsAssembler FINAL : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the SIRT entry to see if the value is
+ // that can be used to avoid loading the handle scope entry to see if the value is
// NULL.
- void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset, ManagedRegister in_reg,
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed.
- void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset, ManagedRegister mscratch,
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister mscratch,
bool null_allowed) OVERRIDE;
- // src holds a SIRT entry (Object**) load this into dst
- void LoadReferenceFromSirt(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+ // src holds a handle scope entry (Object**) load this into dst
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 6a3efc5..0791c63 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1727,8 +1727,8 @@ void X86Assembler::MemoryBarrier(ManagedRegister) {
#endif
}
-void X86Assembler::CreateSirtEntry(ManagedRegister mout_reg,
- FrameOffset sirt_offset,
+void X86Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
+ FrameOffset handle_scope_offset,
ManagedRegister min_reg, bool null_allowed) {
X86ManagedRegister out_reg = mout_reg.AsX86();
X86ManagedRegister in_reg = min_reg.AsX86();
@@ -1742,34 +1742,34 @@ void X86Assembler::CreateSirtEntry(ManagedRegister mout_reg,
}
testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
j(kZero, &null_arg);
- leal(out_reg.AsCpuRegister(), Address(ESP, sirt_offset));
+ leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset));
Bind(&null_arg);
} else {
- leal(out_reg.AsCpuRegister(), Address(ESP, sirt_offset));
+ leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset));
}
}
-void X86Assembler::CreateSirtEntry(FrameOffset out_off,
- FrameOffset sirt_offset,
+void X86Assembler::CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handle_scope_offset,
ManagedRegister mscratch,
bool null_allowed) {
X86ManagedRegister scratch = mscratch.AsX86();
CHECK(scratch.IsCpuRegister());
if (null_allowed) {
Label null_arg;
- movl(scratch.AsCpuRegister(), Address(ESP, sirt_offset));
+ movl(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
j(kZero, &null_arg);
- leal(scratch.AsCpuRegister(), Address(ESP, sirt_offset));
+ leal(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
Bind(&null_arg);
} else {
- leal(scratch.AsCpuRegister(), Address(ESP, sirt_offset));
+ leal(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
}
Store(out_off, scratch, 4);
}
-// Given a SIRT entry, load the associated reference.
-void X86Assembler::LoadReferenceFromSirt(ManagedRegister mout_reg,
+// Given a handle scope entry, load the associated reference.
+void X86Assembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
ManagedRegister min_reg) {
X86ManagedRegister out_reg = mout_reg.AsX86();
X86ManagedRegister in_reg = min_reg.AsX86();
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 057c80a..2fc6049 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -541,20 +541,20 @@ class X86Assembler FINAL : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the SIRT entry to see if the value is
+ // that can be used to avoid loading the handle scope entry to see if the value is
// NULL.
- void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset, ManagedRegister in_reg,
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed.
- void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset, ManagedRegister scratch,
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister scratch,
bool null_allowed) OVERRIDE;
- // src holds a SIRT entry (Object**) load this into dst
- void LoadReferenceFromSirt(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+ // src holds a handle scope entry (Object**) load this into dst
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 8eaeae1..0ede875 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -1989,8 +1989,8 @@ void X86_64Assembler::MemoryBarrier(ManagedRegister) {
#endif
}
-void X86_64Assembler::CreateSirtEntry(ManagedRegister mout_reg,
- FrameOffset sirt_offset,
+void X86_64Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
+ FrameOffset handle_scope_offset,
ManagedRegister min_reg, bool null_allowed) {
X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
X86_64ManagedRegister in_reg = min_reg.AsX86_64();
@@ -1998,7 +1998,7 @@ void X86_64Assembler::CreateSirtEntry(ManagedRegister mout_reg,
// Use out_reg as indicator of NULL
in_reg = out_reg;
// TODO: movzwl
- movl(in_reg.AsCpuRegister(), Address(CpuRegister(RSP), sirt_offset));
+ movl(in_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
}
CHECK(in_reg.IsCpuRegister());
CHECK(out_reg.IsCpuRegister());
@@ -2010,34 +2010,34 @@ void X86_64Assembler::CreateSirtEntry(ManagedRegister mout_reg,
}
testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
j(kZero, &null_arg);
- leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), sirt_offset));
+ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
Bind(&null_arg);
} else {
- leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), sirt_offset));
+ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
}
}
-void X86_64Assembler::CreateSirtEntry(FrameOffset out_off,
- FrameOffset sirt_offset,
+void X86_64Assembler::CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handle_scope_offset,
ManagedRegister mscratch,
bool null_allowed) {
X86_64ManagedRegister scratch = mscratch.AsX86_64();
CHECK(scratch.IsCpuRegister());
if (null_allowed) {
Label null_arg;
- movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), sirt_offset));
+ movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
j(kZero, &null_arg);
- leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), sirt_offset));
+ leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
Bind(&null_arg);
} else {
- leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), sirt_offset));
+ leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
}
Store(out_off, scratch, 8);
}
-// Given a SIRT entry, load the associated reference.
-void X86_64Assembler::LoadReferenceFromSirt(ManagedRegister mout_reg,
+// Given a handle scope entry, load the associated reference.
+void X86_64Assembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
ManagedRegister min_reg) {
X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
X86_64ManagedRegister in_reg = min_reg.AsX86_64();
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 87fb359..548d379 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -566,20 +566,20 @@ class X86_64Assembler FINAL : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the SIRT entry to see if the value is
+ // that can be used to avoid loading the handle scope entry to see if the value is
// NULL.
- void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset, ManagedRegister in_reg,
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed.
- void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset, ManagedRegister scratch,
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister scratch,
bool null_allowed) OVERRIDE;
- // src holds a SIRT entry (Object**) load this into dst
- virtual void LoadReferenceFromSirt(ManagedRegister dst,
+ // src holds a handle scope entry (Object**) load this into dst
+ virtual void LoadReferenceFromHandleScope(ManagedRegister dst,
ManagedRegister src);
// Heap::VerifyObject on src. In some cases (such as a reference to this) we