summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorIan Rogers <irogers@google.com>2011-07-21 22:00:15 -0700
committerIan Rogers <irogers@google.com>2011-07-22 15:16:43 -0700
commit45a76cb99104a222d6a9bd768a084893dcb7cf30 (patch)
tree995557ba95942309ffe9c4a13819a10da8c4b4bb /src
parent69759eaa6fd4386f1e6d8748052ad221087b3476 (diff)
downloadart-45a76cb99104a222d6a9bd768a084893dcb7cf30.zip
art-45a76cb99104a222d6a9bd768a084893dcb7cf30.tar.gz
art-45a76cb99104a222d6a9bd768a084893dcb7cf30.tar.bz2
Exception and suspend count polling on JNI bridge return.
Change-Id: I0e5597fcbdcdb88100b18d63323e7ba8d27f13fe
Diffstat (limited to 'src')
-rw-r--r--src/assembler.cc1
-rw-r--r--src/assembler.h72
-rw-r--r--src/assembler_arm.cc19
-rw-r--r--src/assembler_arm.h13
-rw-r--r--src/assembler_x86.cc90
-rw-r--r--src/assembler_x86.h14
-rw-r--r--src/calling_convention_arm.cc7
-rw-r--r--src/calling_convention_x86.cc7
-rw-r--r--src/jni_compiler.cc15
-rw-r--r--src/jni_compiler.h6
-rw-r--r--src/jni_compiler_test.cc83
-rw-r--r--src/object.cc1
-rw-r--r--src/object.h4
-rw-r--r--src/thread.h54
14 files changed, 370 insertions, 16 deletions
diff --git a/src/assembler.cc b/src/assembler.cc
index 52d424a..8bd7625 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -54,6 +54,7 @@ AssemblerBuffer::AssemblerBuffer() {
cursor_ = contents_;
limit_ = ComputeLimit(contents_, kInitialBufferCapacity);
fixup_ = NULL;
+ slow_path_ = NULL;
#if defined(DEBUG)
has_ensured_capacity_ = false;
fixups_processed_ = false;
diff --git a/src/assembler.h b/src/assembler.h
index cc02c69..c98a6b7 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -5,7 +5,9 @@
#include "src/logging.h"
#include "src/macros.h"
+#include "src/managed_register.h"
#include "src/memory_region.h"
+#include "src/offsets.h"
namespace art {
@@ -83,6 +85,52 @@ class AssemblerFixup {
friend class AssemblerBuffer;
};
+// Parent of all queued slow paths, emitted during finalization
+class SlowPath {
+ public:
+ SlowPath() : next_(NULL) {}
+ virtual ~SlowPath() {}
+
+ Label* Continuation() { return &continuation_; }
+ Label* Entry() { return &entry_; }
+ // Generate code for slow path
+ virtual void Emit(Assembler *sp_asm) = 0;
+
+ protected:
+ // Entry branched to by fast path
+ Label entry_;
+ // Optional continuation that is branched to at the end of the slow path
+ Label continuation_;
+ // Next in linked list of slow paths
+ SlowPath *next_;
+
+ friend class AssemblerBuffer;
+ DISALLOW_COPY_AND_ASSIGN(SlowPath);
+};
+
+// Slowpath entered when Thread::Current()->_exception is non-null
+class ExceptionSlowPath : public SlowPath {
+ public:
+ ExceptionSlowPath() {}
+ virtual void Emit(Assembler *sp_asm);
+};
+
+// Slowpath entered when Thread::Current()->_suspend_count is non-zero
+class SuspendCountSlowPath : public SlowPath {
+ public:
+ SuspendCountSlowPath(ManagedRegister return_reg,
+ FrameOffset return_save_location,
+ size_t return_size) :
+ return_register_(return_reg), return_save_location_(return_save_location),
+ return_size_(return_size) {}
+ virtual void Emit(Assembler *sp_asm);
+
+ private:
+ // Remember how to save the return value
+ const ManagedRegister return_register_;
+ const FrameOffset return_save_location_;
+ const size_t return_size_;
+};
class AssemblerBuffer {
public:
@@ -113,6 +161,27 @@ class AssemblerBuffer {
fixup_ = fixup;
}
+ void EnqueueSlowPath(SlowPath* slowpath) {
+ if (slow_path_ == NULL) {
+ slow_path_ = slowpath;
+ } else {
+ SlowPath* cur = slow_path_;
+ for ( ; cur->next_ != NULL ; cur = cur->next_) {}
+ cur->next_ = slowpath;
+ }
+ }
+
+ void EmitSlowPaths(Assembler* sp_asm) {
+ SlowPath* cur = slow_path_;
+ SlowPath* next = NULL;
+ slow_path_ = NULL;
+ for ( ; cur != NULL ; cur = next) {
+ cur->Emit(sp_asm);
+ next = cur->next_;
+ delete cur;
+ }
+ }
+
// Get the size of the emitted code.
size_t Size() const {
CHECK_GE(cursor_, contents_);
@@ -203,6 +272,9 @@ class AssemblerBuffer {
AssemblerFixup* fixup_;
bool fixups_processed_;
+ // Head of linked list of slow paths
+ SlowPath* slow_path_;
+
byte* cursor() const { return cursor_; }
byte* limit() const { return limit_; }
size_t Capacity() const {
diff --git a/src/assembler_arm.cc b/src/assembler_arm.cc
index 603be68..d470b90 100644
--- a/src/assembler_arm.cc
+++ b/src/assembler_arm.cc
@@ -1412,6 +1412,10 @@ void Assembler::StoreStackOffsetToThread(ThreadOffset thr_offs,
TR, thr_offs.Int32Value());
}
+void Assembler::StoreStackPointerToThread(ThreadOffset thr_offs) {
+ StoreToOffset(kStoreWord, SP, TR, thr_offs.Int32Value());
+}
+
void Assembler::Move(ManagedRegister dest, ManagedRegister src) {
if (dest.IsCoreRegister()) {
CHECK(src.IsCoreRegister());
@@ -1504,4 +1508,19 @@ void Assembler::Call(ManagedRegister base, Offset offset,
// TODO: place reference map on call
}
+// Generate code to check if Thread::Current()->suspend_count_ is non-zero
+// and branch to a SuspendSlowPath if it is. The SuspendSlowPath will continue
+// at the next instruction.
+void Assembler::SuspendPoll(ManagedRegister scratch, ManagedRegister return_reg,
+ FrameOffset return_save_location,
+ size_t return_size) {
+ LOG(WARNING) << "Unimplemented: Suspend poll";
+}
+
+// Generate code to check if Thread::Current()->exception_ is non-null
+// and branch to a ExceptionSlowPath if it is.
+void Assembler::ExceptionPoll(ManagedRegister scratch) {
+ LOG(WARNING) << "Unimplemented: Exception poll";
+}
+
} // namespace art
diff --git a/src/assembler_arm.h b/src/assembler_arm.h
index 3981f4a..acaf870 100644
--- a/src/assembler_arm.h
+++ b/src/assembler_arm.h
@@ -446,6 +446,7 @@ class Assembler {
void StoreStackOffsetToThread(ThreadOffset thr_offs, FrameOffset fr_offs,
ManagedRegister scratch);
+ void StoreStackPointerToThread(ThreadOffset thr_offs);
void Move(ManagedRegister dest, ManagedRegister src);
void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch,
@@ -464,12 +465,24 @@ class Assembler {
void Call(ManagedRegister base, Offset offset, ManagedRegister scratch);
+ // Generate code to check if Thread::Current()->suspend_count_ is non-zero
+ // and branch to a SuspendSlowPath if it is. The SuspendSlowPath will continue
+ // at the next instruction.
+ void SuspendPoll(ManagedRegister scratch, ManagedRegister return_reg,
+ FrameOffset return_save_location, size_t return_size);
+
+ // Generate code to check if Thread::Current()->exception_ is non-null
+ // and branch to a ExceptionSlowPath if it is.
+ void ExceptionPoll(ManagedRegister scratch);
+
// Emit data (e.g. encoded instruction or immediate) to the
// instruction stream.
void Emit(int32_t value);
void Bind(Label* label);
+ void EmitSlowPaths() { buffer_.EmitSlowPaths(this); }
+
size_t CodeSize() const { return buffer_.Size(); }
void FinalizeInstructions(const MemoryRegion& region) {
diff --git a/src/assembler_x86.cc b/src/assembler_x86.cc
index 2b21463..a3d47e3 100644
--- a/src/assembler_x86.cc
+++ b/src/assembler_x86.cc
@@ -5,6 +5,7 @@
#include "src/globals.h"
#include "src/memory_region.h"
#include "src/offsets.h"
+#include "src/thread.h"
namespace art {
@@ -1396,10 +1397,19 @@ void Assembler::DecreaseFrameSize(size_t adjust) {
// Store bytes from the given register onto the stack
void Assembler::Store(FrameOffset offs, ManagedRegister src, size_t size) {
- if (src.IsCpuRegister()) {
+ if (src.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (src.IsCpuRegister()) {
CHECK_EQ(4u, size);
movl(Address(ESP, offs), src.AsCpuRegister());
- } else if (src.IsXmmRegister()) {
+ } else if (src.IsX87Register()) {
+ if (size == 4) {
+ fstps(Address(ESP, offs));
+ } else {
+ fstpl(Address(ESP, offs));
+ }
+ } else {
+ CHECK(src.IsXmmRegister());
if (size == 4) {
movss(Address(ESP, offs), src.AsXmmRegister());
} else {
@@ -1437,12 +1447,24 @@ void Assembler::StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
}
void Assembler::Load(ManagedRegister dest, FrameOffset src, size_t size) {
- if (dest.IsCpuRegister()) {
+ if (dest.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (dest.IsCpuRegister()) {
CHECK_EQ(4u, size);
movl(dest.AsCpuRegister(), Address(ESP, src));
+ } else if (dest.IsX87Register()) {
+ if (size == 4) {
+ flds(Address(ESP, src));
+ } else {
+ fldl(Address(ESP, src));
+ }
} else {
- // TODO: x87, SSE
- LOG(FATAL) << "Unimplemented";
+ CHECK(dest.IsXmmRegister());
+ if (size == 4) {
+ movss(dest.AsXmmRegister(), Address(ESP, src));
+ } else {
+ movsd(dest.AsXmmRegister(), Address(ESP, src));
+ }
}
}
@@ -1488,6 +1510,11 @@ void Assembler::StoreStackOffsetToThread(ThreadOffset thr_offs,
movl(Address::Absolute(thr_offs), scratch.AsCpuRegister());
}
+void Assembler::StoreStackPointerToThread(ThreadOffset thr_offs) {
+ fs();
+ movl(Address::Absolute(thr_offs), ESP);
+}
+
void Assembler::Move(ManagedRegister dest, ManagedRegister src) {
if (!dest.Equals(src)) {
if (dest.IsCpuRegister() && src.IsCpuRegister()) {
@@ -1579,4 +1606,57 @@ void Assembler::Call(ManagedRegister base, Offset offset,
// TODO: place reference map on call
}
+// Generate code to check if Thread::Current()->suspend_count_ is non-zero
+// and branch to a SuspendSlowPath if it is. The SuspendSlowPath will continue
+// at the next instruction.
+void Assembler::SuspendPoll(ManagedRegister scratch, ManagedRegister return_reg,
+ FrameOffset return_save_location,
+ size_t return_size) {
+ SuspendCountSlowPath* slow =
+ new SuspendCountSlowPath(return_reg, return_save_location, return_size);
+ buffer_.EnqueueSlowPath(slow);
+ fs();
+ cmpl(Address::Absolute(Thread::SuspendCountOffset()), Immediate(0));
+ j(NOT_EQUAL, slow->Entry());
+ Bind(slow->Continuation());
+}
+void SuspendCountSlowPath::Emit(Assembler *sp_asm) {
+ sp_asm->Bind(&entry_);
+ // Save return value
+ sp_asm->Store(return_save_location_, return_register_, return_size_);
+ // Pass top of stack as argument
+ sp_asm->pushl(ESP);
+ sp_asm->fs();
+ sp_asm->call(Address::Absolute(Thread::SuspendCountEntryPointOffset()));
+ // Release argument
+ sp_asm->addl(ESP, Immediate(kPointerSize));
+ // Reload return value
+ sp_asm->Load(return_register_, return_save_location_, return_size_);
+ sp_asm->jmp(&continuation_);
+}
+
+// Generate code to check if Thread::Current()->exception_ is non-null
+// and branch to a ExceptionSlowPath if it is.
+void Assembler::ExceptionPoll(ManagedRegister scratch) {
+ ExceptionSlowPath* slow = new ExceptionSlowPath();
+ buffer_.EnqueueSlowPath(slow);
+ fs();
+ cmpl(Address::Absolute(Thread::ExceptionOffset()), Immediate(0));
+ j(NOT_EQUAL, slow->Entry());
+ Bind(slow->Continuation());
+}
+void ExceptionSlowPath::Emit(Assembler *sp_asm) {
+ sp_asm->Bind(&entry_);
+ // NB the return value is dead
+ // Pass top of stack as argument
+ sp_asm->pushl(ESP);
+ sp_asm->fs();
+ sp_asm->call(Address::Absolute(Thread::ExceptionEntryPointOffset()));
+ // TODO: this call should never return as it should make a long jump to
+ // the appropriate catch block
+ // Release argument
+ sp_asm->addl(ESP, Immediate(kPointerSize));
+ sp_asm->jmp(&continuation_);
+}
+
} // namespace art
diff --git a/src/assembler_x86.h b/src/assembler_x86.h
index a32191f..e446c51 100644
--- a/src/assembler_x86.h
+++ b/src/assembler_x86.h
@@ -445,6 +445,8 @@ class Assembler {
void StoreStackOffsetToThread(ThreadOffset thr_offs, FrameOffset fr_offs,
ManagedRegister scratch);
+ void StoreStackPointerToThread(ThreadOffset thr_offs);
+
void Move(ManagedRegister dest, ManagedRegister src);
void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch,
@@ -463,6 +465,16 @@ class Assembler {
void Call(ManagedRegister base, Offset offset, ManagedRegister scratch);
+ // Generate code to check if Thread::Current()->suspend_count_ is non-zero
+ // and branch to a SuspendSlowPath if it is. The SuspendSlowPath will continue
+ // at the next instruction.
+ void SuspendPoll(ManagedRegister scratch, ManagedRegister return_reg,
+ FrameOffset return_save_location, size_t return_size);
+
+ // Generate code to check if Thread::Current()->exception_ is non-null
+ // and branch to a ExceptionSlowPath if it is.
+ void ExceptionPoll(ManagedRegister scratch);
+
void AddImmediate(Register reg, const Immediate& imm);
void LoadDoubleConstant(XmmRegister dst, double value);
@@ -484,6 +496,8 @@ class Assembler {
void Align(int alignment, int offset);
void Bind(Label* label);
+ void EmitSlowPaths() { buffer_.EmitSlowPaths(this); }
+
size_t CodeSize() const { return buffer_.Size(); }
void FinalizeInstructions(const MemoryRegion& region) {
diff --git a/src/calling_convention_arm.cc b/src/calling_convention_arm.cc
index 233905a..bac943b 100644
--- a/src/calling_convention_arm.cc
+++ b/src/calling_convention_arm.cc
@@ -15,12 +15,15 @@ ManagedRegister CallingConvention::InterproceduralScratchRegister() {
}
ManagedRegister CallingConvention::ReturnRegister() {
+ const Method *method = GetMethod();
if (GetMethod()->IsReturnAFloat()) {
return ManagedRegister::FromSRegister(S0);
- } else if (GetMethod()->IsReturnAFloat()) {
+ } else if (GetMethod()->IsReturnADouble()) {
return ManagedRegister::FromDRegister(D0);
- } else if (GetMethod()->IsReturnALong()) {
+ } else if (method->IsReturnALong()) {
return ManagedRegister::FromRegisterPair(R0_R1);
+ } else if (method->IsReturnVoid()) {
+ return ManagedRegister::NoRegister();
} else {
return ManagedRegister::FromCoreRegister(R0);
}
diff --git a/src/calling_convention_x86.cc b/src/calling_convention_x86.cc
index a72f361..f1b96a6 100644
--- a/src/calling_convention_x86.cc
+++ b/src/calling_convention_x86.cc
@@ -16,10 +16,13 @@ ManagedRegister CallingConvention::InterproceduralScratchRegister() {
}
ManagedRegister CallingConvention::ReturnRegister() {
- if (GetMethod()->IsReturnAFloatOrDouble()) {
+ const Method *method = GetMethod();
+ if (method->IsReturnAFloatOrDouble()) {
return ManagedRegister::FromX87Register(ST0);
- } else if (GetMethod()->IsReturnALong()) {
+ } else if (method->IsReturnALong()) {
return ManagedRegister::FromRegisterPair(EAX_EDX);
+ } else if (method->IsReturnVoid()) {
+ return ManagedRegister::NoRegister();
} else {
return ManagedRegister::FromCpuRegister(EAX);
}
diff --git a/src/jni_compiler.cc b/src/jni_compiler.cc
index 5385a8b..a6f5faf 100644
--- a/src/jni_compiler.cc
+++ b/src/jni_compiler.cc
@@ -89,6 +89,7 @@ void JniCompiler::Compile(Assembler* jni_asm, Method* native_method) {
// 5. Transition from being in managed to native code
// TODO: write out anchor, ensure the transition to native follow a store
// fence.
+ jni_asm->StoreStackPointerToThread(Thread::TopOfManagedStackOffset());
jni_asm->StoreImmediateToThread(Thread::StateOffset(), Thread::kNative,
mr_conv.InterproceduralScratchRegister());
@@ -230,9 +231,18 @@ void JniCompiler::Compile(Assembler* jni_asm, Method* native_method) {
// 12. Transition from being in native to managed code, possibly entering a
// safepoint
+ CHECK(!jni_conv.InterproceduralScratchRegister()
+ .Equals(jni_conv.ReturnRegister())); // don't clobber result
+ // Location to preserve result on slow path, ensuring its within the frame
+ FrameOffset return_save_location = jni_conv.ReturnValueSaveLocation();
+ CHECK_LT(return_save_location.Uint32Value(), frame_size);
+ jni_asm->SuspendPoll(jni_conv.InterproceduralScratchRegister(),
+ jni_conv.ReturnRegister(), return_save_location,
+ jni_conv.SizeOfReturnValue());
+ jni_asm->ExceptionPoll(jni_conv.InterproceduralScratchRegister());
jni_asm->StoreImmediateToThread(Thread::StateOffset(), Thread::kRunnable,
- mr_conv.InterproceduralScratchRegister());
- // TODO: check for safepoint transition
+ jni_conv.InterproceduralScratchRegister());
+
// 15. Place result in correct register possibly dehandlerizing
if (jni_conv.IsReturnAReference()) {
@@ -250,6 +260,7 @@ void JniCompiler::Compile(Assembler* jni_asm, Method* native_method) {
jni_asm->RemoveFrame(frame_size);
// 18. Finalize code generation
+ jni_asm->EmitSlowPaths();
size_t cs = jni_asm->CodeSize();
MemoryRegion code(AllocateCode(cs), cs);
jni_asm->FinalizeInstructions(code);
diff --git a/src/jni_compiler.h b/src/jni_compiler.h
index 48666aa..495164b 100644
--- a/src/jni_compiler.h
+++ b/src/jni_compiler.h
@@ -3,9 +3,9 @@
#ifndef ART_SRC_JNI_COMPILER_H_
#define ART_SRC_JNI_COMPILER_H_
-#include "calling_convention.h"
-#include "globals.h"
-#include "macros.h"
+#include "src/calling_convention.h"
+#include "src/globals.h"
+#include "src/macros.h"
namespace art {
diff --git a/src/jni_compiler_test.cc b/src/jni_compiler_test.cc
index 23615ed..db8a9e7 100644
--- a/src/jni_compiler_test.cc
+++ b/src/jni_compiler_test.cc
@@ -198,7 +198,7 @@ TEST_F(JniCompilerTest, CompileAndRunNoArgMethod) {
jvalue a;
a.l = (jobject)NULL;
- EXPECT_EQ(0, gJava_MyClass_foo_calls);
+ gJava_MyClass_foo_calls = 0;
RunMethod(method, a, a, a, a);
EXPECT_EQ(1, gJava_MyClass_foo_calls);
RunMethod(method, a, a, a, a);
@@ -456,4 +456,85 @@ TEST_F(JniCompilerTest, CompileAndRunStaticSynchronizedIntObjectObjectMethod) {
EXPECT_EQ(7, gJava_MyClass_fooSSIOO_calls);
}
+int gSuspendCounterHandler_calls;
+void SuspendCountHandler(Method** frame) {
+ EXPECT_EQ(0, (*frame)->GetName().compare("fooI"));
+ gSuspendCounterHandler_calls++;
+ Thread::Current()->DecrementSuspendCount();
+}
+TEST_F(JniCompilerTest, SuspendCountAcknolewdgement) {
+ scoped_ptr<DexFile> dex(OpenDexFileBase64(kMyClassNativesDex));
+ scoped_ptr<ClassLinker> linker(ClassLinker::Create());
+ linker->AppendToClassPath(dex.get());
+ Class* klass = linker->FindClass("LMyClass;", NULL);
+ Method* method = klass->FindVirtualMethod("fooI");
+
+ Assembler jni_asm;
+ JniCompiler jni_compiler;
+ jni_compiler.Compile(&jni_asm, method);
+
+ // TODO: should really use JNIEnv to RegisterNative, but missing a
+ // complete story on this, so hack the RegisterNative below
+ method->RegisterNative(reinterpret_cast<void*>(&Java_MyClass_fooI));
+ Thread::Current()->RegisterSuspendCountEntryPoint(&SuspendCountHandler);
+
+ gSuspendCounterHandler_calls = 0;
+ gJava_MyClass_fooI_calls = 0;
+ jvalue a, b, c;
+ a.l = (jobject)NULL;
+ b.i = 42;
+ c = RunMethod(method, a, b, a, a);
+ ASSERT_EQ(42, c.i);
+ EXPECT_EQ(1, gJava_MyClass_fooI_calls);
+ EXPECT_EQ(0, gSuspendCounterHandler_calls);
+ Thread::Current()->IncrementSuspendCount();
+ c = RunMethod(method, a, b, a, a);
+ ASSERT_EQ(42, c.i);
+ EXPECT_EQ(2, gJava_MyClass_fooI_calls);
+ EXPECT_EQ(1, gSuspendCounterHandler_calls);
+ c = RunMethod(method, a, b, a, a);
+ ASSERT_EQ(42, c.i);
+ EXPECT_EQ(3, gJava_MyClass_fooI_calls);
+ EXPECT_EQ(1, gSuspendCounterHandler_calls);
+}
+
+int gExceptionHandler_calls;
+void ExceptionHandler(Method** frame) {
+ EXPECT_EQ(0, (*frame)->GetName().compare("foo"));
+ gExceptionHandler_calls++;
+ Thread::Current()->ClearException();
+}
+TEST_F(JniCompilerTest, ExceptionHandling) {
+ scoped_ptr<DexFile> dex(OpenDexFileBase64(kMyClassNativesDex));
+ scoped_ptr<ClassLinker> linker(ClassLinker::Create());
+ linker->AppendToClassPath(dex.get());
+ Class* klass = linker->FindClass("LMyClass;", NULL);
+ Method* method = klass->FindVirtualMethod("foo");
+
+ Assembler jni_asm;
+ JniCompiler jni_compiler;
+ jni_compiler.Compile(&jni_asm, method);
+
+ // TODO: should really use JNIEnv to RegisterNative, but missing a
+ // complete story on this, so hack the RegisterNative below
+ method->RegisterNative(reinterpret_cast<void*>(&Java_MyClass_foo));
+ Thread::Current()->RegisterExceptionEntryPoint(&ExceptionHandler);
+
+ gExceptionHandler_calls = 0;
+ gJava_MyClass_foo_calls = 0;
+ jvalue a;
+ a.l = (jobject)NULL;
+ RunMethod(method, a, a, a, a);
+ EXPECT_EQ(1, gJava_MyClass_foo_calls);
+ EXPECT_EQ(0, gExceptionHandler_calls);
+ // TODO: create a real exception here
+ Thread::Current()->SetException(reinterpret_cast<Object*>(8));
+ RunMethod(method, a, a, a, a);
+ EXPECT_EQ(2, gJava_MyClass_foo_calls);
+ EXPECT_EQ(1, gExceptionHandler_calls);
+ RunMethod(method, a, a, a, a);
+ EXPECT_EQ(3, gJava_MyClass_foo_calls);
+ EXPECT_EQ(1, gExceptionHandler_calls);
+}
+
} // namespace art
diff --git a/src/object.cc b/src/object.cc
index c8bd353..c79271d 100644
--- a/src/object.cc
+++ b/src/object.cc
@@ -129,6 +129,7 @@ bool Method::IsParamALongOrDouble(unsigned int param) const {
static size_t ShortyCharToSize(char x) {
switch (x) {
+ case 'V': return 0;
case '[': return kPointerSize;
case 'L': return kPointerSize;
case 'D': return 8;
diff --git a/src/object.h b/src/object.h
index 266822d..218aed9 100644
--- a/src/object.h
+++ b/src/object.h
@@ -486,6 +486,10 @@ class Method : public Object {
return shorty_[0] == 'J';
}
+ bool IsReturnVoid() const {
+ return shorty_[0] == 'V';
+ }
+
// The number of arguments that should be supplied to this method
size_t NumArgs() const {
return (IsStatic() ? 0 : 1) + shorty_.length() - 1;
diff --git a/src/thread.h b/src/thread.h
index 3c6e64b..fd03464 100644
--- a/src/thread.h
+++ b/src/thread.h
@@ -11,6 +11,7 @@
#include "src/jni_internal.h"
#include "src/logging.h"
#include "src/macros.h"
+#include "src/object.h"
#include "src/offsets.h"
#include "src/runtime.h"
@@ -151,6 +152,11 @@ class Thread {
exception_ = NULL;
}
+ // Offset of exception within Thread, used by generated code
+ static ThreadOffset ExceptionOffset() {
+ return ThreadOffset(OFFSETOF_MEMBER(Thread, exception_));
+ }
+
void SetName(const char* name);
void Suspend();
@@ -173,6 +179,10 @@ class Thread {
state_ = new_state;
}
+ static ThreadOffset SuspendCountOffset() {
+ return ThreadOffset(OFFSETOF_MEMBER(Thread, suspend_count_));
+ }
+
// Offset of state within Thread, used by generated code
static ThreadOffset StateOffset() {
return ThreadOffset(OFFSETOF_MEMBER(Thread, state_));
@@ -188,6 +198,11 @@ class Thread {
return ThreadOffset(OFFSETOF_MEMBER(Thread, jni_env_));
}
+ // Offset of top of managed stack address, used by generated code
+ static ThreadOffset TopOfManagedStackOffset() {
+ return ThreadOffset(OFFSETOF_MEMBER(Thread, top_of_managed_stack_));
+ }
+
// Offset of top stack handle block within Thread, used by generated code
static ThreadOffset TopShbOffset() {
return ThreadOffset(OFFSETOF_MEMBER(Thread, top_shb_));
@@ -202,9 +217,31 @@ class Thread {
return count;
}
+ // Offset of exception_entry_point_ within Thread, used by generated code
+ static ThreadOffset ExceptionEntryPointOffset() {
+ return ThreadOffset(OFFSETOF_MEMBER(Thread, exception_entry_point_));
+ }
+
+ void RegisterExceptionEntryPoint(void (*handler)(Method**)) {
+ exception_entry_point_ = handler;
+ }
+
+ // Offset of suspend_count_entry_point_ within Thread, used by generated code
+ static ThreadOffset SuspendCountEntryPointOffset() {
+ return ThreadOffset(OFFSETOF_MEMBER(Thread, suspend_count_entry_point_));
+ }
+
+ void RegisterSuspendCountEntryPoint(void (*handler)(Method**)) {
+ suspend_count_entry_point_ = handler;
+ }
+
+ // Increasing the suspend count, will cause the thread to run to safepoint
+ void IncrementSuspendCount() { suspend_count_++; }
+ void DecrementSuspendCount() { suspend_count_--; }
+
private:
Thread() :
- id_(1234), top_shb_(NULL), exception_(NULL) {
+ id_(1234), top_shb_(NULL), exception_(NULL), suspend_count_(0) {
jni_env_ = new JniEnvironment();
}
@@ -217,6 +254,11 @@ class Thread {
// Managed thread id.
uint32_t id_;
+ // Top of the managed stack, written out prior to the state transition from
+ // kRunnable to kNative. Uses include to give the starting point for scanning
+ // a managed stack when a thread is in native code.
+ void* top_of_managed_stack_;
+
// Top of linked list of stack handle blocks or NULL for none
StackHandleBlock* top_shb_;
@@ -241,6 +283,10 @@ class Thread {
// The pending exception or NULL.
Object* exception_;
+ // A non-zero value is used to tell the current thread to enter a safe point
+ // at the next poll.
+ int suspend_count_;
+
// The inclusive base of the control stack.
byte* stack_base_;
@@ -250,6 +296,12 @@ class Thread {
// TLS key used to retrieve the VM thread object.
static pthread_key_t pthread_key_self_;
+ // Entry point called when exception_ is set
+ void (*exception_entry_point_)(Method** frame);
+
+ // Entry point called when suspend_count_ is non-zero
+ void (*suspend_count_entry_point_)(Method** frame);
+
DISALLOW_COPY_AND_ASSIGN(Thread);
};
std::ostream& operator<<(std::ostream& os, const Thread::State& state);