diff options
-rw-r--r-- | build/Android.gtest.mk | 4 | ||||
-rw-r--r-- | runtime/arch/x86_64/quick_entrypoints_x86_64.S | 31 | ||||
-rw-r--r-- | runtime/entrypoints/quick/quick_trampoline_entrypoints.cc | 14 | ||||
-rw-r--r-- | runtime/mirror/art_method.cc | 9 | ||||
-rw-r--r-- | runtime/mirror/art_method.h | 5 | ||||
-rw-r--r-- | runtime/stack.cc | 20 | ||||
-rw-r--r-- | runtime/thread.cc | 4 |
7 files changed, 58 insertions, 29 deletions
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk index 19748a8..7829f9b 100644 --- a/build/Android.gtest.mk +++ b/build/Android.gtest.mk @@ -173,6 +173,10 @@ define build-art-test # Mac OS complains about unresolved symbols if you don't include this. LOCAL_WHOLE_STATIC_LIBRARIES := libgtest_host endif + ifneq ($(BUILD_HOST_64bit),) + # 64bit Linux complains about unresolved symbols if you don't include this. + LOCAL_WHOLE_STATIC_LIBRARIES := libgtest_host + endif include $(BUILD_HOST_EXECUTABLE) art_gtest_exe := $(HOST_OUT_EXECUTABLES)/$$(LOCAL_MODULE) ART_HOST_GTEST_EXECUTABLES += $$(art_gtest_exe) diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S index a78a1e5..81d5f4c 100644 --- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S +++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S @@ -778,8 +778,8 @@ DEFINE_FUNCTION art_quick_generic_jni_trampoline movq %gs:THREAD_SELF_OFFSET, %rdi movq %rbp, %rsi call PLT_SYMBOL(artQuickGenericJniTrampoline) // (Thread*, sp) - test %rax, %rax // check whether error (negative value) - js 1f + test %rax, %rax // Check for error, negative value. + js .Lentry_error // release part of the alloca addq %rax, %rsp // get the code pointer @@ -803,7 +803,7 @@ DEFINE_FUNCTION art_quick_generic_jni_trampoline movq 56(%rsp), %xmm7 addq LITERAL(64), %rsp // floating-point done // native call - call *%rax // Q: is the stack aligned 16B with or without the return addr? + call *%rax // Stack should be aligned 16B without the return addr? // result sign extension is handled in C code // prepare for artQuickGenericJniEndTrampoline call // (Thread*, SP, result, result_f) @@ -814,15 +814,18 @@ DEFINE_FUNCTION art_quick_generic_jni_trampoline movq %rax, %rdx movq %xmm0, %rcx call PLT_SYMBOL(artQuickGenericJniEndTrampoline) - // tear down the alloca already + + // Tear down the alloca. movq %rbp, %rsp CFI_DEF_CFA_REGISTER(rsp) - // Exceptions possible. + + // Pending exceptions possible. // TODO: use cmpq, needs direct encoding because of gas bug - movq %gs:THREAD_EXCEPTION_OFFSET, %rbx - test %rbx, %rbx - jnz 2f - // Tear down the callee-save frame + movq %gs:THREAD_EXCEPTION_OFFSET, %rcx + test %rcx, %rcx + jnz .Lexception_in_native + + // Tear down the callee-save frame. // Load FPRs. // movq %xmm0, 16(%rsp) // doesn't make sense!!! movq 24(%rsp), %xmm1 // neither does this!!! @@ -850,11 +853,13 @@ DEFINE_FUNCTION art_quick_generic_jni_trampoline // store into fpr, for when it's a fpr return... movq %rax, %xmm0 ret -1: - // tear down the _whole_ scratch space, assumes SIRT is empty, cookie not valid etc. +.Lentry_error: movq %rbp, %rsp - CFI_DEF_CFA_REGISTER(rsp) -2: RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME +.Lexception_in_native: + CFI_REL_OFFSET(rsp,176) + // TODO: the SIRT contains the this pointer which is used by the debugger for exception + // delivery. + RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME DELIVER_PENDING_EXCEPTION END_FUNCTION art_quick_generic_jni_trampoline diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc index 1bbaa6a..2a77fb8 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -1363,7 +1363,7 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor { sirt_number_of_references_++; } sirt_->SetNumberOfReferences(sirt_expected_refs_); - + DCHECK_NE(sirt_expected_refs_, 0U); // Install Sirt. self->PushSirt(sirt_); } @@ -1453,6 +1453,8 @@ extern "C" ssize_t artQuickGenericJniTrampoline(Thread* self, mirror::ArtMethod* // fix up managed-stack things in Thread self->SetTopOfStack(sp, 0); + self->VerifyStack(); + // start JNI, save the cookie uint32_t cookie; if (called->IsSynchronized()) { @@ -1465,7 +1467,7 @@ extern "C" ssize_t artQuickGenericJniTrampoline(Thread* self, mirror::ArtMethod* } else { cookie = JniMethodStart(self); } - *(sp32-1) = cookie; + *(sp32 - 1) = cookie; // retrieve native code const void* nativeCode = called->GetNativeMethod(); @@ -1479,7 +1481,7 @@ extern "C" ssize_t artQuickGenericJniTrampoline(Thread* self, mirror::ArtMethod* *code_pointer = reinterpret_cast<uintptr_t>(nativeCode); // 5K reserved, window_size used. - return 5*1024 - window_size; + return (5 * KB) - window_size; } /* @@ -1491,7 +1493,7 @@ extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, mirror::ArtMet SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp); mirror::ArtMethod* called = *sp; - uint32_t cookie = *(sp32-1); + uint32_t cookie = *(sp32 - 1); MethodHelper mh(called); char return_shorty_char = mh.GetShorty()[0]; @@ -1502,7 +1504,7 @@ extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, mirror::ArtMet ComputeGenericJniFrameSize fsc; fsc.ComputeSirtOffset(); uint32_t offset = fsc.GetFirstSirtEntryOffset(); - jobject tmp = reinterpret_cast<jobject>(reinterpret_cast<uint8_t*>(sp)-offset); + jobject tmp = reinterpret_cast<jobject>(reinterpret_cast<uint8_t*>(sp) - offset); return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(result.l, cookie, tmp, self)); @@ -1514,7 +1516,7 @@ extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, mirror::ArtMet ComputeGenericJniFrameSize fsc; fsc.ComputeSirtOffset(); uint32_t offset = fsc.GetFirstSirtEntryOffset(); - jobject tmp = reinterpret_cast<jobject>(reinterpret_cast<uint8_t*>(sp)-offset); + jobject tmp = reinterpret_cast<jobject>(reinterpret_cast<uint8_t*>(sp) - offset); JniMethodEndSynchronized(cookie, tmp, self); } else { diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc index fe27992..6b897cb 100644 --- a/runtime/mirror/art_method.cc +++ b/runtime/mirror/art_method.cc @@ -320,6 +320,15 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* self->PopManagedStackFragment(fragment); } +#ifndef NDEBUG +size_t ArtMethod::GetSirtOffsetInBytes() { + CHECK(IsNative()); + // TODO: support Sirt access from generic JNI trampoline. + CHECK_NE(GetEntryPointFromQuickCompiledCode(), GetQuickGenericJniTrampoline()); + return kPointerSize; +} +#endif + bool ArtMethod::IsRegistered() { void* native_method = GetFieldPtr<void*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_jni_), false); diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h index a61698d..8c22e67 100644 --- a/runtime/mirror/art_method.h +++ b/runtime/mirror/art_method.h @@ -342,10 +342,13 @@ class MANAGED ArtMethod : public Object { return GetFrameSizeInBytes() - kPointerSize; } +#ifndef NDEBUG + size_t GetSirtOffsetInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); +#else size_t GetSirtOffsetInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - CHECK(IsNative()); return kPointerSize; } +#endif bool IsRegistered(); diff --git a/runtime/stack.cc b/runtime/stack.cc index a6a0b29..abaea6f 100644 --- a/runtime/stack.cc +++ b/runtime/stack.cc @@ -108,20 +108,26 @@ mirror::Object* StackVisitor::GetThisObject() const { return NULL; } else if (m->IsNative()) { if (cur_quick_frame_ != NULL) { - StackIndirectReferenceTable* sirt = - reinterpret_cast<StackIndirectReferenceTable*>( - reinterpret_cast<char*>(cur_quick_frame_) + - m->GetSirtOffsetInBytes()); - return sirt->GetReference(0); + if (m->GetEntryPointFromQuickCompiledCode() == GetQuickGenericJniTrampoline()) { + UNIMPLEMENTED(ERROR) << "Failed to determine this object of native method: " + << PrettyMethod(m); + return nullptr; + } else { + StackIndirectReferenceTable* sirt = + reinterpret_cast<StackIndirectReferenceTable*>( + reinterpret_cast<char*>(cur_quick_frame_) + + m->GetSirtOffsetInBytes()); + return sirt->GetReference(0); + } } else { return cur_shadow_frame_->GetVRegReference(0); } } else { const DexFile::CodeItem* code_item = MethodHelper(m).GetCodeItem(); if (code_item == NULL) { - UNIMPLEMENTED(ERROR) << "Failed to determine this object of abstract or proxy method" + UNIMPLEMENTED(ERROR) << "Failed to determine this object of abstract or proxy method: " << PrettyMethod(m); - return NULL; + return nullptr; } else { uint16_t reg = code_item->registers_size_ - code_item->ins_size_; return reinterpret_cast<mirror::Object*>(GetVReg(m, reg, kReferenceVReg)); diff --git a/runtime/thread.cc b/runtime/thread.cc index 6d8ede5..680d269 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -1829,11 +1829,11 @@ Context* Thread::GetLongJumpContext() { return result; } -struct CurrentMethodVisitor : public StackVisitor { +struct CurrentMethodVisitor FINAL : public StackVisitor { CurrentMethodVisitor(Thread* thread, Context* context) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(thread, context), this_object_(nullptr), method_(nullptr), dex_pc_(0) {} - virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { mirror::ArtMethod* m = GetMethod(); if (m->IsRuntimeMethod()) { // Continue if this is a runtime method. |