diff options
Diffstat (limited to 'runtime')
-rw-r--r-- | runtime/arch/mips/entrypoints_init_mips.cc | 4 | ||||
-rw-r--r-- | runtime/arch/mips64/entrypoints_init_mips64.cc | 3 | ||||
-rw-r--r-- | runtime/art_method.cc | 47 | ||||
-rw-r--r-- | runtime/mirror/class.cc | 6 | ||||
-rw-r--r-- | runtime/stack_map.h | 10 | ||||
-rw-r--r-- | runtime/verifier/reg_type.cc | 10 |
6 files changed, 51 insertions, 29 deletions
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc index ff04106..09a018e 100644 --- a/runtime/arch/mips/entrypoints_init_mips.cc +++ b/runtime/arch/mips/entrypoints_init_mips.cc @@ -267,6 +267,10 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow; static_assert(!IsDirectEntrypoint(kQuickThrowStackOverflow), "Non-direct C stub marked direct."); + // Deoptimize + qpoints->pDeoptimize = art_quick_deoptimize; + static_assert(!IsDirectEntrypoint(kQuickDeoptimize), "Non-direct C stub marked direct."); + // Atomic 64-bit load/store qpoints->pA64Load = QuasiAtomic::Read64; static_assert(IsDirectEntrypoint(kQuickA64Load), "Non-direct C stub marked direct."); diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc index 321c27b..4904af9 100644 --- a/runtime/arch/mips64/entrypoints_init_mips64.cc +++ b/runtime/arch/mips64/entrypoints_init_mips64.cc @@ -176,6 +176,9 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception; qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow; + // Deoptimize + qpoints->pDeoptimize = art_quick_deoptimize; + // TODO - use lld/scd instructions for Mips64 // Atomic 64-bit load/store qpoints->pA64Load = QuasiAtomic::Read64; diff --git a/runtime/art_method.cc b/runtime/art_method.cc index 16c099d..8619503 100644 --- a/runtime/art_method.cc +++ b/runtime/art_method.cc @@ -182,29 +182,32 @@ uint32_t ArtMethod::ToDexPc(const uintptr_t pc, bool abort_on_failure) { uint32_t sought_offset = pc - reinterpret_cast<uintptr_t>(entry_point); if (IsOptimized(sizeof(void*))) { CodeInfo code_info = GetOptimizedCodeInfo(); - return code_info.GetStackMapForNativePcOffset(sought_offset).GetDexPc(code_info); - } - - MappingTable table(entry_point != nullptr ? - GetMappingTable(EntryPointToCodePointer(entry_point), sizeof(void*)) : nullptr); - if (table.TotalSize() == 0) { - // NOTE: Special methods (see Mir2Lir::GenSpecialCase()) have an empty mapping - // but they have no suspend checks and, consequently, we never call ToDexPc() for them. - DCHECK(IsNative() || IsCalleeSaveMethod() || IsProxyMethod()) << PrettyMethod(this); - return DexFile::kDexNoIndex; // Special no mapping case - } - // Assume the caller wants a pc-to-dex mapping so check here first. - typedef MappingTable::PcToDexIterator It; - for (It cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) { - if (cur.NativePcOffset() == sought_offset) { - return cur.DexPc(); + StackMap stack_map = code_info.GetStackMapForNativePcOffset(sought_offset); + if (stack_map.IsValid()) { + return stack_map.GetDexPc(code_info); } - } - // Now check dex-to-pc mappings. - typedef MappingTable::DexToPcIterator It2; - for (It2 cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) { - if (cur.NativePcOffset() == sought_offset) { - return cur.DexPc(); + } else { + MappingTable table(entry_point != nullptr ? + GetMappingTable(EntryPointToCodePointer(entry_point), sizeof(void*)) : nullptr); + if (table.TotalSize() == 0) { + // NOTE: Special methods (see Mir2Lir::GenSpecialCase()) have an empty mapping + // but they have no suspend checks and, consequently, we never call ToDexPc() for them. + DCHECK(IsNative() || IsCalleeSaveMethod() || IsProxyMethod()) << PrettyMethod(this); + return DexFile::kDexNoIndex; // Special no mapping case + } + // Assume the caller wants a pc-to-dex mapping so check here first. + typedef MappingTable::PcToDexIterator It; + for (It cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) { + if (cur.NativePcOffset() == sought_offset) { + return cur.DexPc(); + } + } + // Now check dex-to-pc mappings. + typedef MappingTable::DexToPcIterator It2; + for (It2 cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) { + if (cur.NativePcOffset() == sought_offset) { + return cur.DexPc(); + } } } if (abort_on_failure) { diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc index f0b7bfd..5bd6583 100644 --- a/runtime/mirror/class.cc +++ b/runtime/mirror/class.cc @@ -471,7 +471,8 @@ ArtMethod* Class::FindDirectMethod( ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature, size_t pointer_size) { for (auto& method : GetVirtualMethods(pointer_size)) { - if (name == method.GetName() && method.GetSignature() == signature) { + ArtMethod* const np_method = method.GetInterfaceMethodIfProxy(pointer_size); + if (name == np_method->GetName() && np_method->GetSignature() == signature) { return &method; } } @@ -481,7 +482,8 @@ ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name, const Strin ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name, const Signature& signature, size_t pointer_size) { for (auto& method : GetVirtualMethods(pointer_size)) { - if (name == method.GetName() && signature == method.GetSignature()) { + ArtMethod* const np_method = method.GetInterfaceMethodIfProxy(pointer_size); + if (name == np_method->GetName() && signature == np_method->GetSignature()) { return &method; } } diff --git a/runtime/stack_map.h b/runtime/stack_map.h index 6cc1709..71e38ff 100644 --- a/runtime/stack_map.h +++ b/runtime/stack_map.h @@ -681,8 +681,12 @@ class DexRegisterMap { */ class StackMap { public: + StackMap() {} + explicit StackMap(MemoryRegion region) : region_(region) {} + bool IsValid() const { return region_.pointer() != nullptr; } + uint32_t GetDexPc(const CodeInfo& info) const; void SetDexPc(const CodeInfo& info, uint32_t dex_pc); @@ -975,8 +979,7 @@ class CodeInfo { return stack_map; } } - LOG(FATAL) << "Unreachable"; - UNREACHABLE(); + return StackMap(); } StackMap GetStackMapForNativePcOffset(uint32_t native_pc_offset) const { @@ -987,8 +990,7 @@ class CodeInfo { return stack_map; } } - LOG(FATAL) << "Unreachable"; - UNREACHABLE(); + return StackMap(); } void Dump(std::ostream& os, uint16_t number_of_dex_registers) const; diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc index c8aa4fd..1435607 100644 --- a/runtime/verifier/reg_type.cc +++ b/runtime/verifier/reg_type.cc @@ -585,7 +585,15 @@ const RegType& RegType::Merge(const RegType& incoming_type, RegTypeCache* reg_ty DCHECK(!Equals(incoming_type)); // Trivial equality handled by caller // Perform pointer equality tests for conflict to avoid virtual method dispatch. const ConflictType& conflict = reg_types->Conflict(); - if (this == &conflict) { + if (IsUndefined() || incoming_type.IsUndefined()) { + // There is a difference between undefined and conflict. Conflicts may be copied around, but + // not used. Undefined registers must not be copied. So any merge with undefined should return + // undefined. + if (IsUndefined()) { + return *this; + } + return incoming_type; + } else if (this == &conflict) { DCHECK(IsConflict()); return *this; // Conflict MERGE * => Conflict } else if (&incoming_type == &conflict) { |