summaryrefslogtreecommitdiffstats
path: root/runtime/thread.cc
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2014-05-19 10:52:16 -0700
committerMathieu Chartier <mathieuc@google.com>2014-05-19 15:19:28 -0700
commit2b7c4d196c8abe32f4ca633534917da9de53c359 (patch)
treef14dc45f53d5681a5c0e57b8a8cc6b35eee6e896 /runtime/thread.cc
parent84e524207b23d58a1b1e5f4443000ccac97c4184 (diff)
downloadart-2b7c4d196c8abe32f4ca633534917da9de53c359.zip
art-2b7c4d196c8abe32f4ca633534917da9de53c359.tar.gz
art-2b7c4d196c8abe32f4ca633534917da9de53c359.tar.bz2
Don't get and restore thread state for ScopedFastNativeObjectAccess.
Before we would ensure that we were runnable for fast native object access. However, these are done when you are already runnable. Change-Id: Ia4c6e4c83d146fe2a988b37b3133ca46b0f0fa42
Diffstat (limited to 'runtime/thread.cc')
-rw-r--r--runtime/thread.cc20
1 files changed, 12 insertions, 8 deletions
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 488961e..8c057e3 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -171,7 +171,7 @@ void* Thread::CreateCallback(void* arg) {
return nullptr;
}
-Thread* Thread::FromManagedThread(const ScopedObjectAccessUnchecked& soa,
+Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa,
mirror::Object* thread_peer) {
mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer);
Thread* result = reinterpret_cast<Thread*>(static_cast<uintptr_t>(f->GetLong(thread_peer)));
@@ -186,7 +186,8 @@ Thread* Thread::FromManagedThread(const ScopedObjectAccessUnchecked& soa,
return result;
}
-Thread* Thread::FromManagedThread(const ScopedObjectAccessUnchecked& soa, jobject java_thread) {
+Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa,
+ jobject java_thread) {
return FromManagedThread(soa, soa.Decode<mirror::Object*>(java_thread));
}
@@ -556,7 +557,7 @@ void Thread::Dump(std::ostream& os) const {
DumpStack(os);
}
-mirror::String* Thread::GetThreadName(const ScopedObjectAccessUnchecked& soa) const {
+mirror::String* Thread::GetThreadName(const ScopedObjectAccessAlreadyRunnable& soa) const {
mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
return (tlsPtr_.opeer != nullptr) ? reinterpret_cast<mirror::String*>(f->GetObject(tlsPtr_.opeer)) : nullptr;
}
@@ -1432,7 +1433,7 @@ class BuildInternalStackTraceVisitor : public StackVisitor {
};
template<bool kTransactionActive>
-jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessUnchecked& soa) const {
+jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const {
// Compute depth of stack
CountStackDepthVisitor count_visitor(const_cast<Thread*>(this));
count_visitor.WalkStack();
@@ -1455,11 +1456,14 @@ jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessUnchecked& soa)
}
return soa.AddLocalReference<jobjectArray>(trace);
}
-template jobject Thread::CreateInternalStackTrace<false>(const ScopedObjectAccessUnchecked& soa) const;
-template jobject Thread::CreateInternalStackTrace<true>(const ScopedObjectAccessUnchecked& soa) const;
+template jobject Thread::CreateInternalStackTrace<false>(
+ const ScopedObjectAccessAlreadyRunnable& soa) const;
+template jobject Thread::CreateInternalStackTrace<true>(
+ const ScopedObjectAccessAlreadyRunnable& soa) const;
-jobjectArray Thread::InternalStackTraceToStackTraceElementArray(const ScopedObjectAccess& soa,
- jobject internal, jobjectArray output_array, int* stack_depth) {
+jobjectArray Thread::InternalStackTraceToStackTraceElementArray(
+ const ScopedObjectAccessAlreadyRunnable& soa, jobject internal, jobjectArray output_array,
+ int* stack_depth) {
// Decode the internal stack trace into the depth, method trace and PC trace
int32_t depth = soa.Decode<mirror::ObjectArray<mirror::Object>*>(internal)->GetLength() - 1;