diff options
author | Vladimir Marko <vmarko@google.com> | 2014-05-02 14:40:15 +0100 |
---|---|---|
committer | Vladimir Marko <vmarko@google.com> | 2014-05-13 11:43:22 +0100 |
commit | 7624d25dad2d1ba25969ae704fccf68649103ae5 (patch) | |
tree | de72194b76a4e23e0b15ec4085447ae7e4425815 /runtime/mirror | |
parent | e1910f1d802dff79bba5ef61e1c4fd0b95f6e5b0 (diff) | |
download | art-7624d25dad2d1ba25969ae704fccf68649103ae5.zip art-7624d25dad2d1ba25969ae704fccf68649103ae5.tar.gz art-7624d25dad2d1ba25969ae704fccf68649103ae5.tar.bz2 |
Move quick frame info to OatQuickMethodHeader.
Rename OatMethodHeader to OatQuickMethodHeader, move frame
info from OatMethodOffsets to OatQuickMethodHeader. Retrieve
the info from other places for non-quick methods (portable
compiled bytecode or jni stub, generic jni, runtime,
abstract and proxy).
This change has a libcore/ companion CL
"Remove ArtMethod's quick fields for frame size and spills."
https://android-review.googlesource.com/94164
Bug: 11767815
Change-Id: I0e31a7875d76732e1ec479c86b9b5ca01203507f
Diffstat (limited to 'runtime/mirror')
-rw-r--r-- | runtime/mirror/art_method-inl.h | 39 | ||||
-rw-r--r-- | runtime/mirror/art_method.cc | 4 | ||||
-rw-r--r-- | runtime/mirror/art_method.h | 44 |
3 files changed, 42 insertions, 45 deletions
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h index fb9a09a..91753df 100644 --- a/runtime/mirror/art_method-inl.h +++ b/runtime/mirror/art_method-inl.h @@ -23,7 +23,8 @@ #include "entrypoints/entrypoint_utils.h" #include "object_array.h" #include "oat.h" -#include "runtime.h" +#include "quick/quick_method_frame_info.h" +#include "runtime-inl.h" namespace art { namespace mirror { @@ -81,7 +82,7 @@ inline uint32_t ArtMethod::GetCodeSize() { if (code == nullptr) { return 0u; } - return reinterpret_cast<const OatMethodHeader*>(code)[-1].code_size_; + return reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].code_size_; } inline bool ArtMethod::CheckIncompatibleClassChange(InvokeType type) { @@ -201,6 +202,40 @@ inline void ArtMethod::SetNativeMethod(const void* native_method) { OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_jni_), native_method); } +inline QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo() { + if (UNLIKELY(IsPortableCompiled())) { + // Portable compiled dex bytecode or jni stub. + return QuickMethodFrameInfo(kStackAlignment, 0u, 0u); + } + Runtime* runtime = Runtime::Current(); + if (UNLIKELY(IsAbstract()) || UNLIKELY(IsProxyMethod())) { + return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs); + } + if (UNLIKELY(IsRuntimeMethod())) { + return runtime->GetRuntimeMethodFrameInfo(this); + } + + const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(this); + // On failure, instead of nullptr we get the quick-generic-jni-trampoline for native method + // indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline) + // for non-native methods. And we really shouldn't see a failure for non-native methods here. + DCHECK(entry_point != GetQuickToInterpreterBridgeTrampoline(runtime->GetClassLinker())); + CHECK(entry_point != GetQuickToInterpreterBridge()); + + if (UNLIKELY(entry_point == GetQuickGenericJniTrampoline())) { + // Generic JNI frame. + DCHECK(IsNative()); + uint32_t sirt_refs = MethodHelper(this).GetNumberOfReferenceArgsWithoutReceiver() + 1; + size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSize(sirt_refs); + QuickMethodFrameInfo callee_info = runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs); + return QuickMethodFrameInfo(callee_info.FrameSizeInBytes() + sirt_size, + callee_info.CoreSpillMask(), callee_info.FpSpillMask()); + } + + const void* code_pointer = EntryPointToCodePointer(entry_point); + return reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].frame_info_; +} + } // namespace mirror } // namespace art diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc index 4275f25..eef60f7 100644 --- a/runtime/mirror/art_method.cc +++ b/runtime/mirror/art_method.cc @@ -389,7 +389,7 @@ const uint8_t* ArtMethod::GetMappingTable() { if (code == nullptr) { return nullptr; } - uint32_t offset = reinterpret_cast<const OatMethodHeader*>(code)[-1].mapping_table_offset_; + uint32_t offset = reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].mapping_table_offset_; if (UNLIKELY(offset == 0u)) { return nullptr; } @@ -401,7 +401,7 @@ const uint8_t* ArtMethod::GetVmapTable() { if (code == nullptr) { return nullptr; } - uint32_t offset = reinterpret_cast<const OatMethodHeader*>(code)[-1].vmap_table_offset_; + uint32_t offset = reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].vmap_table_offset_; if (UNLIKELY(offset == 0u)) { return nullptr; } diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h index 71f0210..49d22ab 100644 --- a/runtime/mirror/art_method.h +++ b/runtime/mirror/art_method.h @@ -23,6 +23,7 @@ #include "modifiers.h" #include "object.h" #include "object_callbacks.h" +#include "quick/quick_method_frame_info.h" namespace art { @@ -318,19 +319,14 @@ class MANAGED ArtMethod : public Object { template <bool kCheckFrameSize = true> uint32_t GetFrameSizeInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - uint32_t result = GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_frame_size_in_bytes_)); + uint32_t result = GetQuickFrameInfo().FrameSizeInBytes(); if (kCheckFrameSize) { DCHECK_LE(static_cast<size_t>(kStackAlignment), result); } return result; } - void SetFrameSizeInBytes(size_t new_frame_size_in_bytes) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // Not called within a transaction. - SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_frame_size_in_bytes_), - new_frame_size_in_bytes); - } + QuickMethodFrameInfo GetQuickFrameInfo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); size_t GetReturnPcOffsetInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetFrameSizeInBytes() - kPointerSize; @@ -362,26 +358,6 @@ class MANAGED ArtMethod : public Object { return OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_); } - uint32_t GetCoreSpillMask() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_core_spill_mask_)); - } - - void SetCoreSpillMask(uint32_t core_spill_mask) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // Computed during compilation. - // Not called within a transaction. - SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_core_spill_mask_), core_spill_mask); - } - - uint32_t GetFpSpillMask() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_fp_spill_mask_)); - } - - void SetFpSpillMask(uint32_t fp_spill_mask) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // Computed during compilation. - // Not called within a transaction. - SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_fp_spill_mask_), fp_spill_mask); - } - // Is this a CalleSaveMethod or ResolutionMethod and therefore doesn't adhere to normal // conventions for a method of managed code. Returns false for Proxy methods. bool IsRuntimeMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -474,20 +450,6 @@ class MANAGED ArtMethod : public Object { // ifTable. uint32_t method_index_; - // --- Quick compiler meta-data. --- - // TODO: merge and place in native heap, such as done with the code size. - - // Bit map of spilled machine registers. - uint32_t quick_core_spill_mask_; - - // Bit map of spilled floating point machine registers. - uint32_t quick_fp_spill_mask_; - - // Fixed frame size for this method when executed. - uint32_t quick_frame_size_in_bytes_; - - // --- End of quick compiler meta-data. --- - static Class* java_lang_reflect_ArtMethod_; private: |