diff options
Diffstat (limited to 'compiler/dex')
27 files changed, 467 insertions, 201 deletions
diff --git a/compiler/dex/global_value_numbering_test.cc b/compiler/dex/global_value_numbering_test.cc index 54e34ea..b91c3ca 100644 --- a/compiler/dex/global_value_numbering_test.cc +++ b/compiler/dex/global_value_numbering_test.cc @@ -142,7 +142,7 @@ class GlobalValueNumberingTest : public testing::Test { cu_.mir_graph->ifield_lowering_infos_.reserve(count); for (size_t i = 0u; i != count; ++i) { const IFieldDef* def = &defs[i]; - MirIFieldLoweringInfo field_info(def->field_idx, def->type); + MirIFieldLoweringInfo field_info(def->field_idx, def->type, false); if (def->declaring_dex_file != 0u) { field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file); field_info.declaring_field_idx_ = def->declaring_field_idx; diff --git a/compiler/dex/gvn_dead_code_elimination_test.cc b/compiler/dex/gvn_dead_code_elimination_test.cc index 954e9f1..4d2b8b3 100644 --- a/compiler/dex/gvn_dead_code_elimination_test.cc +++ b/compiler/dex/gvn_dead_code_elimination_test.cc @@ -143,7 +143,7 @@ class GvnDeadCodeEliminationTest : public testing::Test { cu_.mir_graph->ifield_lowering_infos_.reserve(count); for (size_t i = 0u; i != count; ++i) { const IFieldDef* def = &defs[i]; - MirIFieldLoweringInfo field_info(def->field_idx, def->type); + MirIFieldLoweringInfo field_info(def->field_idx, def->type, false); if (def->declaring_dex_file != 0u) { field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file); field_info.declaring_field_idx_ = def->declaring_field_idx; diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h index 97ea05a..379c952 100644 --- a/compiler/dex/local_value_numbering.h +++ b/compiler/dex/local_value_numbering.h @@ -21,8 +21,8 @@ #include "base/arena_object.h" #include "base/logging.h" +#include "dex_instruction_utils.h" #include "global_value_numbering.h" -#include "utils/dex_instruction_utils.h" namespace art { diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc index d1c3a6b..566527a 100644 --- a/compiler/dex/local_value_numbering_test.cc +++ b/compiler/dex/local_value_numbering_test.cc @@ -96,7 +96,7 @@ class LocalValueNumberingTest : public testing::Test { cu_.mir_graph->ifield_lowering_infos_.reserve(count); for (size_t i = 0u; i != count; ++i) { const IFieldDef* def = &defs[i]; - MirIFieldLoweringInfo field_info(def->field_idx, def->type); + MirIFieldLoweringInfo field_info(def->field_idx, def->type, false); if (def->declaring_dex_file != 0u) { field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file); field_info.declaring_field_idx_ = def->declaring_field_idx; diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc index 31dbc60..a89b250 100644 --- a/compiler/dex/mir_analysis.cc +++ b/compiler/dex/mir_analysis.cc @@ -416,8 +416,8 @@ static const uint16_t kAnalysisAttributes[kMirOpLast] = { // 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA} kAnInvoke | kAnHeavyWeight, - // 73 UNUSED_73 - kAnNone, + // 73 RETURN_VOID_BARRIER + kAnBranch, // 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN} kAnInvoke | kAnHeavyWeight, @@ -752,88 +752,88 @@ static const uint16_t kAnalysisAttributes[kMirOpLast] = { // E2 USHR_INT_LIT8 vAA, vBB, #+CC kAnMath | kAnInt, - // E3 IGET_VOLATILE + // E3 IGET_QUICK kAnNone, - // E4 IPUT_VOLATILE + // E4 IGET_WIDE_QUICK kAnNone, - // E5 SGET_VOLATILE + // E5 IGET_OBJECT_QUICK kAnNone, - // E6 SPUT_VOLATILE + // E6 IPUT_QUICK kAnNone, - // E7 IGET_OBJECT_VOLATILE + // E7 IPUT_WIDE_QUICK kAnNone, - // E8 IGET_WIDE_VOLATILE + // E8 IPUT_OBJECT_QUICK kAnNone, - // E9 IPUT_WIDE_VOLATILE - kAnNone, + // E9 INVOKE_VIRTUAL_QUICK + kAnInvoke | kAnHeavyWeight, - // EA SGET_WIDE_VOLATILE - kAnNone, + // EA INVOKE_VIRTUAL_RANGE_QUICK + kAnInvoke | kAnHeavyWeight, - // EB SPUT_WIDE_VOLATILE + // EB IPUT_BOOLEAN_QUICK kAnNone, - // EC BREAKPOINT + // EC IPUT_BYTE_QUICK kAnNone, - // ED THROW_VERIFICATION_ERROR - kAnHeavyWeight | kAnBranch, + // ED IPUT_CHAR_QUICK + kAnNone, - // EE EXECUTE_INLINE + // EE IPUT_SHORT_QUICK kAnNone, - // EF EXECUTE_INLINE_RANGE + // EF IGET_BOOLEAN_QUICK kAnNone, - // F0 INVOKE_OBJECT_INIT_RANGE - kAnInvoke | kAnHeavyWeight, + // F0 IGET_BYTE_QUICK + kAnNone, - // F1 RETURN_VOID_BARRIER - kAnBranch, + // F1 IGET_CHAR_QUICK + kAnNone, - // F2 IGET_QUICK + // F2 IGET_SHORT_QUICK kAnNone, - // F3 IGET_WIDE_QUICK + // F3 UNUSED_F3 kAnNone, - // F4 IGET_OBJECT_QUICK + // F4 UNUSED_F4 kAnNone, - // F5 IPUT_QUICK + // F5 UNUSED_F5 kAnNone, - // F6 IPUT_WIDE_QUICK + // F6 UNUSED_F6 kAnNone, - // F7 IPUT_OBJECT_QUICK + // F7 UNUSED_F7 kAnNone, - // F8 INVOKE_VIRTUAL_QUICK - kAnInvoke | kAnHeavyWeight, + // F8 UNUSED_F8 + kAnNone, - // F9 INVOKE_VIRTUAL_QUICK_RANGE - kAnInvoke | kAnHeavyWeight, + // F9 UNUSED_F9 + kAnNone, - // FA INVOKE_SUPER_QUICK - kAnInvoke | kAnHeavyWeight, + // FA UNUSED_FA + kAnNone, - // FB INVOKE_SUPER_QUICK_RANGE - kAnInvoke | kAnHeavyWeight, + // FB UNUSED_FB + kAnNone, - // FC IPUT_OBJECT_VOLATILE + // FC UNUSED_FC kAnNone, - // FD SGET_OBJECT_VOLATILE + // FD UNUSED_FD kAnNone, - // FE SPUT_OBJECT_VOLATILE + // FE UNUSED_FE kAnNone, // FF UNUSED_FF @@ -1203,12 +1203,13 @@ bool MIRGraph::SkipCompilation(std::string* skip_message) { } void MIRGraph::DoCacheFieldLoweringInfo() { + static constexpr uint32_t kFieldIndexFlagQuickened = 0x80000000; // All IGET/IPUT/SGET/SPUT instructions take 2 code units and there must also be a RETURN. const uint32_t max_refs = (GetNumDalvikInsns() - 1u) / 2u; ScopedArenaAllocator allocator(&cu_->arena_stack); - uint16_t* field_idxs = allocator.AllocArray<uint16_t>(max_refs, kArenaAllocMisc); - DexMemAccessType* field_types = allocator.AllocArray<DexMemAccessType>(max_refs, kArenaAllocMisc); - + auto* field_idxs = allocator.AllocArray<uint32_t>(max_refs, kArenaAllocMisc); + DexMemAccessType* field_types = allocator.AllocArray<DexMemAccessType>( + max_refs, kArenaAllocMisc); // Find IGET/IPUT/SGET/SPUT insns, store IGET/IPUT fields at the beginning, SGET/SPUT at the end. size_t ifield_pos = 0u; size_t sfield_pos = max_refs; @@ -1221,23 +1222,36 @@ void MIRGraph::DoCacheFieldLoweringInfo() { // Get field index and try to find it among existing indexes. If found, it's usually among // the last few added, so we'll start the search from ifield_pos/sfield_pos. Though this // is a linear search, it actually performs much better than map based approach. - if (IsInstructionIGetOrIPut(mir->dalvikInsn.opcode)) { - uint16_t field_idx = mir->dalvikInsn.vC; + const bool is_iget_or_iput = IsInstructionIGetOrIPut(mir->dalvikInsn.opcode); + const bool is_iget_or_iput_quick = IsInstructionIGetQuickOrIPutQuick(mir->dalvikInsn.opcode); + if (is_iget_or_iput || is_iget_or_iput_quick) { + uint32_t field_idx; + DexMemAccessType access_type; + if (is_iget_or_iput) { + field_idx = mir->dalvikInsn.vC; + access_type = IGetOrIPutMemAccessType(mir->dalvikInsn.opcode); + } else { + DCHECK(is_iget_or_iput_quick); + // Set kFieldIndexFlagQuickened so that we don't deduplicate against non quickened field + // indexes. + field_idx = mir->offset | kFieldIndexFlagQuickened; + access_type = IGetQuickOrIPutQuickMemAccessType(mir->dalvikInsn.opcode); + } size_t i = ifield_pos; while (i != 0u && field_idxs[i - 1] != field_idx) { --i; } if (i != 0u) { mir->meta.ifield_lowering_info = i - 1; - DCHECK_EQ(field_types[i - 1], IGetOrIPutMemAccessType(mir->dalvikInsn.opcode)); + DCHECK_EQ(field_types[i - 1], access_type); } else { mir->meta.ifield_lowering_info = ifield_pos; field_idxs[ifield_pos] = field_idx; - field_types[ifield_pos] = IGetOrIPutMemAccessType(mir->dalvikInsn.opcode); + field_types[ifield_pos] = access_type; ++ifield_pos; } } else if (IsInstructionSGetOrSPut(mir->dalvikInsn.opcode)) { - uint16_t field_idx = mir->dalvikInsn.vB; + auto field_idx = mir->dalvikInsn.vB; size_t i = sfield_pos; while (i != max_refs && field_idxs[i] != field_idx) { ++i; @@ -1261,7 +1275,12 @@ void MIRGraph::DoCacheFieldLoweringInfo() { DCHECK_EQ(ifield_lowering_infos_.size(), 0u); ifield_lowering_infos_.reserve(ifield_pos); for (size_t pos = 0u; pos != ifield_pos; ++pos) { - ifield_lowering_infos_.push_back(MirIFieldLoweringInfo(field_idxs[pos], field_types[pos])); + const uint32_t field_idx = field_idxs[pos]; + const bool is_quickened = (field_idx & kFieldIndexFlagQuickened) != 0; + const uint32_t masked_field_idx = field_idx & ~kFieldIndexFlagQuickened; + CHECK_LT(masked_field_idx, 1u << 16); + ifield_lowering_infos_.push_back( + MirIFieldLoweringInfo(masked_field_idx, field_types[pos], is_quickened)); } MirIFieldLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(), ifield_lowering_infos_.data(), ifield_pos); @@ -1282,18 +1301,19 @@ void MIRGraph::DoCacheFieldLoweringInfo() { void MIRGraph::DoCacheMethodLoweringInfo() { static constexpr uint16_t invoke_types[] = { kVirtual, kSuper, kDirect, kStatic, kInterface }; + static constexpr uint32_t kMethodIdxFlagQuickened = 0x80000000; // Embed the map value in the entry to avoid extra padding in 64-bit builds. struct MapEntry { // Map key: target_method_idx, invoke_type, devirt_target. Ordered to avoid padding. const MethodReference* devirt_target; - uint16_t target_method_idx; + uint32_t target_method_idx; + uint32_t vtable_idx; uint16_t invoke_type; // Map value. uint32_t lowering_info_index; }; - // Sort INVOKEs by method index, then by opcode, then by devirtualization target. struct MapEntryComparator { bool operator()(const MapEntry& lhs, const MapEntry& rhs) const { if (lhs.target_method_idx != rhs.target_method_idx) { @@ -1302,6 +1322,9 @@ void MIRGraph::DoCacheMethodLoweringInfo() { if (lhs.invoke_type != rhs.invoke_type) { return lhs.invoke_type < rhs.invoke_type; } + if (lhs.vtable_idx != rhs.vtable_idx) { + return lhs.vtable_idx < rhs.vtable_idx; + } if (lhs.devirt_target != rhs.devirt_target) { if (lhs.devirt_target == nullptr) { return true; @@ -1319,7 +1342,7 @@ void MIRGraph::DoCacheMethodLoweringInfo() { ScopedArenaAllocator allocator(&cu_->arena_stack); // All INVOKE instructions take 3 code units and there must also be a RETURN. - uint32_t max_refs = (GetNumDalvikInsns() - 1u) / 3u; + const uint32_t max_refs = (GetNumDalvikInsns() - 1u) / 3u; // Map invoke key (see MapEntry) to lowering info index and vice versa. // The invoke_map and sequential entries are essentially equivalent to Boost.MultiIndex's @@ -1330,28 +1353,43 @@ void MIRGraph::DoCacheMethodLoweringInfo() { allocator.AllocArray<const MapEntry*>(max_refs, kArenaAllocMisc); // Find INVOKE insns and their devirtualization targets. + const VerifiedMethod* verified_method = GetCurrentDexCompilationUnit()->GetVerifiedMethod(); AllNodesIterator iter(this); for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) { if (bb->block_type != kDalvikByteCode) { continue; } for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) { - if (IsInstructionInvoke(mir->dalvikInsn.opcode)) { - // Decode target method index and invoke type. - uint16_t target_method_idx = mir->dalvikInsn.vB; - DexInvokeType invoke_type_idx = InvokeInstructionType(mir->dalvikInsn.opcode); - + const bool is_quick_invoke = IsInstructionQuickInvoke(mir->dalvikInsn.opcode); + const bool is_invoke = IsInstructionInvoke(mir->dalvikInsn.opcode); + if (is_quick_invoke || is_invoke) { + uint32_t vtable_index = 0; + uint32_t target_method_idx = 0; + uint32_t invoke_type_idx = 0; // Default to virtual (in case of quickened). + DCHECK_EQ(invoke_types[invoke_type_idx], kVirtual); + if (is_quick_invoke) { + // We need to store the vtable index since we can't necessarily recreate it at resolve + // phase if the dequickening resolved to an interface method. + vtable_index = mir->dalvikInsn.vB; + // Fake up the method index by storing the mir offset so that we can read the dequicken + // info in resolve. + target_method_idx = mir->offset | kMethodIdxFlagQuickened; + } else { + DCHECK(is_invoke); + // Decode target method index and invoke type. + invoke_type_idx = InvokeInstructionType(mir->dalvikInsn.opcode); + target_method_idx = mir->dalvikInsn.vB; + } // Find devirtualization target. // TODO: The devirt map is ordered by the dex pc here. Is there a way to get INVOKEs // ordered by dex pc as well? That would allow us to keep an iterator to devirt targets // and increment it as needed instead of making O(log n) lookups. - const VerifiedMethod* verified_method = GetCurrentDexCompilationUnit()->GetVerifiedMethod(); const MethodReference* devirt_target = verified_method->GetDevirtTarget(mir->offset); - // Try to insert a new entry. If the insertion fails, we will have found an old one. MapEntry entry = { devirt_target, target_method_idx, + vtable_index, invoke_types[invoke_type_idx], static_cast<uint32_t>(invoke_map.size()) }; @@ -1362,22 +1400,24 @@ void MIRGraph::DoCacheMethodLoweringInfo() { } } } - if (invoke_map.empty()) { return; } - // Prepare unique method infos, set method info indexes for their MIRs. - DCHECK_EQ(method_lowering_infos_.size(), 0u); const size_t count = invoke_map.size(); method_lowering_infos_.reserve(count); for (size_t pos = 0u; pos != count; ++pos) { const MapEntry* entry = sequential_entries[pos]; - MirMethodLoweringInfo method_info(entry->target_method_idx, - static_cast<InvokeType>(entry->invoke_type)); + const bool is_quick = (entry->target_method_idx & kMethodIdxFlagQuickened) != 0; + const uint32_t masked_method_idx = entry->target_method_idx & ~kMethodIdxFlagQuickened; + MirMethodLoweringInfo method_info(masked_method_idx, + static_cast<InvokeType>(entry->invoke_type), is_quick); if (entry->devirt_target != nullptr) { method_info.SetDevirtualizationTarget(*entry->devirt_target); } + if (is_quick) { + method_info.SetVTableIndex(entry->vtable_idx); + } method_lowering_infos_.push_back(method_info); } MirMethodLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(), diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc index f9f7e22..dfaff6c 100644 --- a/compiler/dex/mir_dataflow.cc +++ b/compiler/dex/mir_dataflow.cc @@ -374,7 +374,7 @@ const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = { // 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA} DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS, - // 73 UNUSED_73 + // 73 RETURN_VOID_BARRIER DF_NOP, // 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN} @@ -710,89 +710,89 @@ const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = { // E2 USHR_INT_LIT8 vAA, vBB, #+CC DF_DA | DF_UB | DF_CORE_A | DF_CORE_B, - // E3 IGET_VOLATILE + // E3 IGET_QUICK DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN, - // E4 IPUT_VOLATILE - DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN, - - // E5 SGET_VOLATILE - DF_DA | DF_SFIELD | DF_CLINIT | DF_UMS, - - // E6 SPUT_VOLATILE - DF_UA | DF_SFIELD | DF_CLINIT | DF_UMS, + // E4 IGET_WIDE_QUICK + DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN, - // E7 IGET_OBJECT_VOLATILE + // E5 IGET_OBJECT_QUICK DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN, - // E8 IGET_WIDE_VOLATILE - DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN, + // E6 IPUT_QUICK + DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN, - // E9 IPUT_WIDE_VOLATILE + // E7 IPUT_WIDE_QUICK DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN, - // EA SGET_WIDE_VOLATILE - DF_DA | DF_A_WIDE | DF_SFIELD | DF_CLINIT | DF_UMS, + // E8 IPUT_OBJECT_QUICK + DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN, - // EB SPUT_WIDE_VOLATILE - DF_UA | DF_A_WIDE | DF_SFIELD | DF_CLINIT | DF_UMS, + // E9 INVOKE_VIRTUAL_QUICK + DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS, - // EC BREAKPOINT - DF_NOP, + // EA INVOKE_VIRTUAL_RANGE_QUICK + DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS, - // ED THROW_VERIFICATION_ERROR - DF_NOP | DF_UMS, + // EB IPUT_BOOLEAN_QUICK vA, vB, index + DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN, - // EE EXECUTE_INLINE - DF_FORMAT_35C, + // EC IPUT_BYTE_QUICK vA, vB, index + DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN, - // EF EXECUTE_INLINE_RANGE - DF_FORMAT_3RC, + // ED IPUT_CHAR_QUICK vA, vB, index + DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN, - // F0 INVOKE_OBJECT_INIT_RANGE - DF_NOP, + // EE IPUT_SHORT_QUICK vA, vB, index + DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN, - // F1 RETURN_VOID_BARRIER - DF_NOP, + // EF IGET_BOOLEAN_QUICK vA, vB, index + DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN, - // F2 IGET_QUICK - DF_DA | DF_UB | DF_NULL_CHK_B | DF_IFIELD | DF_LVN, + // F0 IGET_BYTE_QUICK vA, vB, index + DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN, - // F3 IGET_WIDE_QUICK - DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_IFIELD | DF_LVN, + // F1 IGET_CHAR_QUICK vA, vB, index + DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN, - // F4 IGET_OBJECT_QUICK - DF_DA | DF_UB | DF_NULL_CHK_B | DF_IFIELD | DF_LVN, + // F2 IGET_SHORT_QUICK vA, vB, index + DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN, - // F5 IPUT_QUICK - DF_UA | DF_UB | DF_NULL_CHK_B | DF_IFIELD | DF_LVN, + // F3 UNUSED_F3 + DF_NOP, - // F6 IPUT_WIDE_QUICK - DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_IFIELD | DF_LVN, + // F4 UNUSED_F4 + DF_NOP, - // F7 IPUT_OBJECT_QUICK - DF_UA | DF_UB | DF_NULL_CHK_B | DF_IFIELD | DF_LVN, + // F5 UNUSED_F5 + DF_NOP, - // F8 INVOKE_VIRTUAL_QUICK - DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS, + // F6 UNUSED_F6 + DF_NOP, - // F9 INVOKE_VIRTUAL_QUICK_RANGE - DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS, + // F7 UNUSED_F7 + DF_NOP, - // FA INVOKE_SUPER_QUICK - DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS, + // F8 UNUSED_F8 + DF_NOP, - // FB INVOKE_SUPER_QUICK_RANGE - DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS, + // F9 UNUSED_F9 + DF_NOP, - // FC IPUT_OBJECT_VOLATILE - DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN, + // FA UNUSED_FA + DF_NOP, - // FD SGET_OBJECT_VOLATILE - DF_DA | DF_REF_A | DF_SFIELD | DF_CLINIT | DF_UMS, + // FB UNUSED_FB + DF_NOP, - // FE SPUT_OBJECT_VOLATILE - DF_UA | DF_REF_A | DF_SFIELD | DF_CLINIT | DF_UMS, + // FC UNUSED_FC + DF_NOP, + + // FD UNUSED_FD + DF_NOP, + + // FE UNUSED_FE + DF_NOP, // FF UNUSED_FF DF_NOP, diff --git a/compiler/dex/mir_field_info.cc b/compiler/dex/mir_field_info.cc index 53afcad..d2079a2 100644 --- a/compiler/dex/mir_field_info.cc +++ b/compiler/dex/mir_field_info.cc @@ -35,8 +35,9 @@ void MirIFieldLoweringInfo::Resolve(CompilerDriver* compiler_driver, DCHECK(field_infos != nullptr); DCHECK_NE(count, 0u); for (auto it = field_infos, end = field_infos + count; it != end; ++it) { - MirIFieldLoweringInfo unresolved(it->field_idx_, it->MemAccessType()); - DCHECK_EQ(memcmp(&unresolved, &*it, sizeof(*it)), 0); + MirIFieldLoweringInfo unresolved(it->field_idx_, it->MemAccessType(), it->IsQuickened()); + unresolved.field_offset_ = it->field_offset_; + unresolved.CheckEquals(*it); } } @@ -49,13 +50,30 @@ void MirIFieldLoweringInfo::Resolve(CompilerDriver* compiler_driver, hs.NewHandle(compiler_driver->GetClassLoader(soa, mUnit))); Handle<mirror::Class> referrer_class(hs.NewHandle( compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit))); + const VerifiedMethod* const verified_method = mUnit->GetVerifiedMethod(); // Even if the referrer class is unresolved (i.e. we're compiling a method without class // definition) we still want to resolve fields and record all available info. - for (auto it = field_infos, end = field_infos + count; it != end; ++it) { - uint32_t field_idx = it->field_idx_; - mirror::ArtField* resolved_field = - compiler_driver->ResolveField(soa, dex_cache, class_loader, mUnit, field_idx, false); + uint32_t field_idx; + mirror::ArtField* resolved_field; + if (!it->IsQuickened()) { + field_idx = it->field_idx_; + resolved_field = compiler_driver->ResolveField(soa, dex_cache, class_loader, mUnit, + field_idx, false); + } else { + const auto mir_offset = it->field_idx_; + // For quickened instructions, it->field_offset_ actually contains the mir offset. + // We need to use the de-quickening info to get dex file / field idx + auto* field_idx_ptr = verified_method->GetDequickenIndex(mir_offset); + CHECK(field_idx_ptr != nullptr); + field_idx = field_idx_ptr->index; + StackHandleScope<1> hs2(soa.Self()); + auto h_dex_cache = hs2.NewHandle(compiler_driver->FindDexCache(field_idx_ptr->dex_file)); + resolved_field = compiler_driver->ResolveFieldWithDexFile( + soa, h_dex_cache, class_loader, field_idx_ptr->dex_file, field_idx, false); + // Since we don't have a valid field index we can't go slow path later. + CHECK(resolved_field != nullptr); + } if (UNLIKELY(resolved_field == nullptr)) { continue; } diff --git a/compiler/dex/mir_field_info.h b/compiler/dex/mir_field_info.h index 98b2da8..ca56958 100644 --- a/compiler/dex/mir_field_info.h +++ b/compiler/dex/mir_field_info.h @@ -19,8 +19,8 @@ #include "base/macros.h" #include "dex_file.h" +#include "dex_instruction_utils.h" #include "offsets.h" -#include "utils/dex_instruction_utils.h" namespace art { @@ -39,6 +39,9 @@ class MirFieldInfo { uint16_t FieldIndex() const { return field_idx_; } + void SetFieldIndex(uint16_t field_idx) { + field_idx_ = field_idx; + } bool IsStatic() const { return (flags_ & kFlagIsStatic) != 0u; @@ -51,6 +54,9 @@ class MirFieldInfo { const DexFile* DeclaringDexFile() const { return declaring_dex_file_; } + void SetDeclaringDexFile(const DexFile* dex_file) { + declaring_dex_file_ = dex_file; + } uint16_t DeclaringClassIndex() const { return declaring_class_idx_; @@ -64,20 +70,35 @@ class MirFieldInfo { return (flags_ & kFlagIsVolatile) != 0u; } + // IGET_QUICK, IGET_BYTE_QUICK, ... + bool IsQuickened() const { + return (flags_ & kFlagIsQuickened) != 0u; + } + DexMemAccessType MemAccessType() const { return static_cast<DexMemAccessType>((flags_ >> kBitMemAccessTypeBegin) & kMemAccessTypeMask); } + void CheckEquals(const MirFieldInfo& other) const { + CHECK_EQ(field_idx_, other.field_idx_); + CHECK_EQ(flags_, other.flags_); + CHECK_EQ(declaring_field_idx_, other.declaring_field_idx_); + CHECK_EQ(declaring_class_idx_, other.declaring_class_idx_); + CHECK_EQ(declaring_dex_file_, other.declaring_dex_file_); + } + protected: enum { kBitIsStatic = 0, kBitIsVolatile, + kBitIsQuickened, kBitMemAccessTypeBegin, kBitMemAccessTypeEnd = kBitMemAccessTypeBegin + 3, // 3 bits for raw type. kFieldInfoBitEnd = kBitMemAccessTypeEnd }; static constexpr uint16_t kFlagIsVolatile = 1u << kBitIsVolatile; static constexpr uint16_t kFlagIsStatic = 1u << kBitIsStatic; + static constexpr uint16_t kFlagIsQuickened = 1u << kBitIsQuickened; static constexpr uint16_t kMemAccessTypeMask = 7u; static_assert((1u << (kBitMemAccessTypeEnd - kBitMemAccessTypeBegin)) - 1u == kMemAccessTypeMask, "Invalid raw type mask"); @@ -117,8 +138,10 @@ class MirIFieldLoweringInfo : public MirFieldInfo { LOCKS_EXCLUDED(Locks::mutator_lock_); // Construct an unresolved instance field lowering info. - explicit MirIFieldLoweringInfo(uint16_t field_idx, DexMemAccessType type) - : MirFieldInfo(field_idx, kFlagIsVolatile, type), // Without kFlagIsStatic. + explicit MirIFieldLoweringInfo(uint16_t field_idx, DexMemAccessType type, bool is_quickened) + : MirFieldInfo(field_idx, + kFlagIsVolatile | (is_quickened ? kFlagIsQuickened : 0u), + type), // Without kFlagIsStatic. field_offset_(0u) { } @@ -134,6 +157,11 @@ class MirIFieldLoweringInfo : public MirFieldInfo { return field_offset_; } + void CheckEquals(const MirIFieldLoweringInfo& other) const { + MirFieldInfo::CheckEquals(other); + CHECK_EQ(field_offset_.Uint32Value(), other.field_offset_.Uint32Value()); + } + private: enum { kBitFastGet = kFieldInfoBitEnd, diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc index 76b5e44..f354a49 100644 --- a/compiler/dex/mir_graph.cc +++ b/compiler/dex/mir_graph.cc @@ -1673,12 +1673,6 @@ void MIRGraph::GetBlockName(BasicBlock* bb, char* name) { } } -const char* MIRGraph::GetShortyFromTargetIdx(int target_idx) { - // TODO: for inlining support, use current code unit. - const DexFile::MethodId& method_id = cu_->dex_file->GetMethodId(target_idx); - return cu_->dex_file->GetShorty(method_id.proto_idx_); -} - const char* MIRGraph::GetShortyFromMethodReference(const MethodReference& target_method) { const DexFile::MethodId& method_id = target_method.dex_file->GetMethodId(target_method.dex_method_index); @@ -1724,8 +1718,7 @@ void MIRGraph::DumpMIRGraph() { * high-word loc for wide arguments. Also pull up any following * MOVE_RESULT and incorporate it into the invoke. */ -CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, - bool is_range) { +CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, bool is_range) { CallInfo* info = static_cast<CallInfo*>(arena_->Alloc(sizeof(CallInfo), kArenaAllocMisc)); MIR* move_result_mir = FindMoveResult(bb, mir); @@ -1744,6 +1737,13 @@ CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, info->opt_flags = mir->optimization_flags; info->type = type; info->is_range = is_range; + if (IsInstructionQuickInvoke(mir->dalvikInsn.opcode)) { + const auto& method_info = GetMethodLoweringInfo(mir); + info->method_ref = method_info.GetTargetMethod(); + } else { + info->method_ref = MethodReference(GetCurrentDexCompilationUnit()->GetDexFile(), + mir->dalvikInsn.vB); + } info->index = mir->dalvikInsn.vB; info->offset = mir->offset; info->mir = mir; diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h index e5abd3b..3dae5b4 100644 --- a/compiler/dex/mir_graph.h +++ b/compiler/dex/mir_graph.h @@ -504,6 +504,7 @@ struct CallInfo { int opt_flags; InvokeType type; uint32_t dex_idx; + MethodReference method_ref; uint32_t index; // Method idx for invokes, type idx for FilledNewArray. uintptr_t direct_code; uintptr_t direct_method; @@ -687,7 +688,7 @@ class MIRGraph { void DoCacheMethodLoweringInfo(); - const MirMethodLoweringInfo& GetMethodLoweringInfo(MIR* mir) { + const MirMethodLoweringInfo& GetMethodLoweringInfo(MIR* mir) const { DCHECK_LT(mir->meta.method_lowering_info, method_lowering_infos_.size()); return method_lowering_infos_[mir->meta.method_lowering_info]; } @@ -1132,7 +1133,6 @@ class MIRGraph { std::string GetSSAName(int ssa_reg); std::string GetSSANameWithConst(int ssa_reg, bool singles_only); void GetBlockName(BasicBlock* bb, char* name); - const char* GetShortyFromTargetIdx(int); const char* GetShortyFromMethodReference(const MethodReference& target_method); void DumpMIRGraph(); CallInfo* NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, bool is_range); diff --git a/compiler/dex/mir_method_info.cc b/compiler/dex/mir_method_info.cc index b234950..3d3d979 100644 --- a/compiler/dex/mir_method_info.cc +++ b/compiler/dex/mir_method_info.cc @@ -33,51 +33,103 @@ void MirMethodLoweringInfo::Resolve(CompilerDriver* compiler_driver, DCHECK(method_infos != nullptr); DCHECK_NE(count, 0u); for (auto it = method_infos, end = method_infos + count; it != end; ++it) { - MirMethodLoweringInfo unresolved(it->MethodIndex(), it->GetInvokeType()); + MirMethodLoweringInfo unresolved(it->MethodIndex(), it->GetInvokeType(), it->IsQuickened()); + unresolved.declaring_dex_file_ = it->declaring_dex_file_; + unresolved.vtable_idx_ = it->vtable_idx_; if (it->target_dex_file_ != nullptr) { unresolved.target_dex_file_ = it->target_dex_file_; unresolved.target_method_idx_ = it->target_method_idx_; } - DCHECK_EQ(memcmp(&unresolved, &*it, sizeof(*it)), 0); + if (kIsDebugBuild) { + unresolved.CheckEquals(*it); + } } } // We're going to resolve methods and check access in a tight loop. It's better to hold // the lock and needed references once than re-acquiring them again and again. ScopedObjectAccess soa(Thread::Current()); - StackHandleScope<3> hs(soa.Self()); + StackHandleScope<4> hs(soa.Self()); Handle<mirror::DexCache> dex_cache(hs.NewHandle(compiler_driver->GetDexCache(mUnit))); Handle<mirror::ClassLoader> class_loader( hs.NewHandle(compiler_driver->GetClassLoader(soa, mUnit))); Handle<mirror::Class> referrer_class(hs.NewHandle( compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit))); + auto current_dex_cache(hs.NewHandle<mirror::DexCache>(nullptr)); // Even if the referrer class is unresolved (i.e. we're compiling a method without class // definition) we still want to resolve methods and record all available info. + const DexFile* const dex_file = mUnit->GetDexFile(); + const bool use_jit = Runtime::Current()->UseJit(); + const VerifiedMethod* const verified_method = mUnit->GetVerifiedMethod(); for (auto it = method_infos, end = method_infos + count; it != end; ++it) { + // For quickened invokes, the dex method idx is actually the mir offset. + if (it->IsQuickened()) { + const auto* dequicken_ref = verified_method->GetDequickenIndex(it->method_idx_); + CHECK(dequicken_ref != nullptr); + it->target_dex_file_ = dequicken_ref->dex_file; + it->target_method_idx_ = dequicken_ref->index; + } // Remember devirtualized invoke target and set the called method to the default. MethodReference devirt_ref(it->target_dex_file_, it->target_method_idx_); MethodReference* devirt_target = (it->target_dex_file_ != nullptr) ? &devirt_ref : nullptr; - it->target_dex_file_ = mUnit->GetDexFile(); - it->target_method_idx_ = it->MethodIndex(); - InvokeType invoke_type = it->GetInvokeType(); - mirror::ArtMethod* resolved_method = - compiler_driver->ResolveMethod(soa, dex_cache, class_loader, mUnit, it->MethodIndex(), - invoke_type); + mirror::ArtMethod* resolved_method = nullptr; + if (!it->IsQuickened()) { + it->target_dex_file_ = dex_file; + it->target_method_idx_ = it->MethodIndex(); + current_dex_cache.Assign(dex_cache.Get()); + resolved_method = compiler_driver->ResolveMethod(soa, dex_cache, class_loader, mUnit, + it->MethodIndex(), invoke_type); + } else { + // The method index is actually the dex PC in this case. + // Calculate the proper dex file and target method idx. + CHECK(use_jit); + CHECK_EQ(invoke_type, kVirtual); + // Don't devirt if we are in a different dex file since we can't have direct invokes in + // another dex file unless we always put a direct / patch pointer. + devirt_target = nullptr; + current_dex_cache.Assign( + Runtime::Current()->GetClassLinker()->FindDexCache(*it->target_dex_file_)); + CHECK(current_dex_cache.Get() != nullptr); + DexCompilationUnit cu( + mUnit->GetCompilationUnit(), mUnit->GetClassLoader(), mUnit->GetClassLinker(), + *it->target_dex_file_, nullptr /* code_item not used */, 0u /* class_def_idx not used */, + it->target_method_idx_, 0u /* access_flags not used */, + nullptr /* verified_method not used */); + resolved_method = compiler_driver->ResolveMethod(soa, current_dex_cache, class_loader, &cu, + it->target_method_idx_, invoke_type, false); + if (resolved_method != nullptr) { + // Since this was a dequickened virtual, it is guaranteed to be resolved. However, it may be + // resolved to an interface method. If this is the case then change the invoke type to + // interface with the assumption that sharp_type will be kVirtual. + if (resolved_method->GetInvokeType() == kInterface) { + it->flags_ = (it->flags_ & ~(kInvokeTypeMask << kBitInvokeTypeBegin)) | + (static_cast<uint16_t>(kInterface) << kBitInvokeTypeBegin); + } + } + } if (UNLIKELY(resolved_method == nullptr)) { continue; } compiler_driver->GetResolvedMethodDexFileLocation(resolved_method, &it->declaring_dex_file_, &it->declaring_class_idx_, &it->declaring_method_idx_); - it->vtable_idx_ = compiler_driver->GetResolvedMethodVTableIndex(resolved_method, invoke_type); + if (!it->IsQuickened()) { + // For quickened invoke virtuals we may have desharpened to an interface method which + // wont give us the right method index, in this case blindly dispatch or else we can't + // compile the method. Converting the invoke to interface dispatch doesn't work since we + // have no way to get the dex method index for quickened invoke virtuals in the interface + // trampolines. + it->vtable_idx_ = + compiler_driver->GetResolvedMethodVTableIndex(resolved_method, invoke_type); + } - MethodReference target_method(mUnit->GetDexFile(), it->MethodIndex()); + MethodReference target_method(it->target_dex_file_, it->target_method_idx_); int fast_path_flags = compiler_driver->IsFastInvoke( - soa, dex_cache, class_loader, mUnit, referrer_class.Get(), resolved_method, &invoke_type, - &target_method, devirt_target, &it->direct_code_, &it->direct_method_); - bool is_referrers_class = (referrer_class.Get() == resolved_method->GetDeclaringClass()); - bool is_class_initialized = + soa, dex_cache, class_loader, mUnit, referrer_class.Get(), resolved_method, + &invoke_type, &target_method, devirt_target, &it->direct_code_, &it->direct_method_); + const bool is_referrers_class = referrer_class.Get() == resolved_method->GetDeclaringClass(); + const bool is_class_initialized = compiler_driver->IsMethodsClassInitialized(referrer_class.Get(), resolved_method); uint16_t other_flags = it->flags_ & ~(kFlagFastPath | kFlagClassIsInitialized | (kInvokeTypeMask << kBitSharpTypeBegin)); diff --git a/compiler/dex/mir_method_info.h b/compiler/dex/mir_method_info.h index 08fb103..e131c96 100644 --- a/compiler/dex/mir_method_info.h +++ b/compiler/dex/mir_method_info.h @@ -46,6 +46,9 @@ class MirMethodInfo { const DexFile* DeclaringDexFile() const { return declaring_dex_file_; } + void SetDeclaringDexFile(const DexFile* dex_file) { + declaring_dex_file_ = dex_file; + } uint16_t DeclaringClassIndex() const { return declaring_class_idx_; @@ -98,11 +101,12 @@ class MirMethodLoweringInfo : public MirMethodInfo { MirMethodLoweringInfo* method_infos, size_t count) LOCKS_EXCLUDED(Locks::mutator_lock_); - MirMethodLoweringInfo(uint16_t method_idx, InvokeType type) + MirMethodLoweringInfo(uint16_t method_idx, InvokeType type, bool is_quickened) : MirMethodInfo(method_idx, ((type == kStatic) ? kFlagIsStatic : 0u) | (static_cast<uint16_t>(type) << kBitInvokeTypeBegin) | - (static_cast<uint16_t>(type) << kBitSharpTypeBegin)), + (static_cast<uint16_t>(type) << kBitSharpTypeBegin) | + (is_quickened ? kFlagQuickened : 0u)), direct_code_(0u), direct_method_(0u), target_dex_file_(nullptr), @@ -131,6 +135,11 @@ class MirMethodLoweringInfo : public MirMethodInfo { return (flags_ & kFlagClassIsInitialized) != 0u; } + // Returns true iff the method invoke is INVOKE_VIRTUAL_QUICK or INVOKE_VIRTUAL_RANGE_QUICK. + bool IsQuickened() const { + return (flags_ & kFlagQuickened) != 0u; + } + InvokeType GetInvokeType() const { return static_cast<InvokeType>((flags_ >> kBitInvokeTypeBegin) & kInvokeTypeMask); } @@ -146,6 +155,9 @@ class MirMethodLoweringInfo : public MirMethodInfo { uint16_t VTableIndex() const { return vtable_idx_; } + void SetVTableIndex(uint16_t index) { + vtable_idx_ = index; + } uintptr_t DirectCode() const { return direct_code_; @@ -159,6 +171,20 @@ class MirMethodLoweringInfo : public MirMethodInfo { return stats_flags_; } + void CheckEquals(const MirMethodLoweringInfo& info) const { + CHECK_EQ(method_idx_, info.method_idx_); + CHECK_EQ(flags_, info.flags_); + CHECK_EQ(declaring_method_idx_, info.declaring_method_idx_); + CHECK_EQ(declaring_class_idx_, info.declaring_class_idx_); + CHECK_EQ(declaring_dex_file_, info.declaring_dex_file_); + CHECK_EQ(direct_code_, info.direct_code_); + CHECK_EQ(direct_method_, info.direct_method_); + CHECK_EQ(target_dex_file_, info.target_dex_file_); + CHECK_EQ(target_method_idx_, info.target_method_idx_); + CHECK_EQ(vtable_idx_, info.vtable_idx_); + CHECK_EQ(stats_flags_, info.stats_flags_); + } + private: enum { kBitFastPath = kMethodInfoBitEnd, @@ -168,12 +194,14 @@ class MirMethodLoweringInfo : public MirMethodInfo { kBitSharpTypeEnd = kBitSharpTypeBegin + 3, // 3 bits for sharp type. kBitIsReferrersClass = kBitSharpTypeEnd, kBitClassIsInitialized, + kBitQuickened, kMethodLoweringInfoBitEnd }; static_assert(kMethodLoweringInfoBitEnd <= 16, "Too many flags"); static constexpr uint16_t kFlagFastPath = 1u << kBitFastPath; static constexpr uint16_t kFlagIsReferrersClass = 1u << kBitIsReferrersClass; static constexpr uint16_t kFlagClassIsInitialized = 1u << kBitClassIsInitialized; + static constexpr uint16_t kFlagQuickened = 1u << kBitQuickened; static constexpr uint16_t kInvokeTypeMask = 7u; static_assert((1u << (kBitInvokeTypeEnd - kBitInvokeTypeBegin)) - 1u == kInvokeTypeMask, "assert invoke type bits failed"); diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc index fd67d4e..93749e4 100644 --- a/compiler/dex/mir_optimization.cc +++ b/compiler/dex/mir_optimization.cc @@ -1437,7 +1437,7 @@ void MIRGraph::ComputeInlineIFieldLoweringInfo(uint16_t field_idx, MIR* invoke, nullptr /* code_item not used */, 0u /* class_def_idx not used */, target.dex_method_index, 0u /* access_flags not used */, nullptr /* verified_method not used */); DexMemAccessType type = IGetOrIPutMemAccessType(iget_or_iput->dalvikInsn.opcode); - MirIFieldLoweringInfo inlined_field_info(field_idx, type); + MirIFieldLoweringInfo inlined_field_info(field_idx, type, false); MirIFieldLoweringInfo::Resolve(cu_->compiler_driver, &inlined_unit, &inlined_field_info, 1u); DCHECK(inlined_field_info.IsResolved()); diff --git a/compiler/dex/mir_optimization_test.cc b/compiler/dex/mir_optimization_test.cc index be05b80..9ce5ebb 100644 --- a/compiler/dex/mir_optimization_test.cc +++ b/compiler/dex/mir_optimization_test.cc @@ -254,7 +254,7 @@ class MirOptimizationTest : public testing::Test { cu_.mir_graph->method_lowering_infos_.reserve(count); for (size_t i = 0u; i != count; ++i) { const MethodDef* def = &defs[i]; - MirMethodLoweringInfo method_info(def->method_idx, def->invoke_type); + MirMethodLoweringInfo method_info(def->method_idx, def->invoke_type, false); if (def->declaring_dex_file != 0u) { method_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file); method_info.declaring_class_idx_ = def->declaring_class_idx; @@ -407,7 +407,7 @@ class NullCheckEliminationTest : public MirOptimizationTest { cu_.mir_graph->ifield_lowering_infos_.reserve(count); for (size_t i = 0u; i != count; ++i) { const IFieldDef* def = &defs[i]; - MirIFieldLoweringInfo field_info(def->field_idx, def->type); + MirIFieldLoweringInfo field_info(def->field_idx, def->type, false); if (def->declaring_dex_file != 0u) { field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file); field_info.declaring_class_idx_ = def->declaring_class_idx; diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc index 3d64833..8833da3 100644 --- a/compiler/dex/quick/arm/assemble_arm.cc +++ b/compiler/dex/quick/arm/assemble_arm.cc @@ -117,11 +117,11 @@ const ArmEncodingMap ArmMir2Lir::EncodingMap[kArmLast] = { "add", "!0C, !1C", 2, kFixupNone), ENCODING_MAP(kThumbAddPcRel, 0xa000, kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1, - kFmtUnused, -1, -1, IS_TERTIARY_OP | IS_BRANCH | NEEDS_FIXUP, + kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0 | REG_USE_PC | NEEDS_FIXUP, "add", "!0C, pc, #!1E", 2, kFixupLoad), ENCODING_MAP(kThumbAddSpRel, 0xa800, kFmtBitBlt, 10, 8, kFmtSkip, -1, -1, kFmtBitBlt, 7, 0, - kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF_SP | REG_USE_SP, + kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0 | REG_USE_SP, "add", "!0C, sp, #!2E", 2, kFixupNone), ENCODING_MAP(kThumbAddSpI7, 0xb000, kFmtBitBlt, 6, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1, @@ -182,7 +182,7 @@ const ArmEncodingMap ArmMir2Lir::EncodingMap[kArmLast] = { "blx", "!0C", 2, kFixupNone), ENCODING_MAP(kThumbBx, 0x4700, kFmtBitBlt, 6, 3, kFmtUnused, -1, -1, kFmtUnused, -1, -1, - kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH, + kFmtUnused, -1, -1, IS_UNARY_OP | REG_USE0 | IS_BRANCH, "bx", "!0C", 2, kFixupNone), ENCODING_MAP(kThumbCmnRR, 0x42c0, kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1, @@ -693,7 +693,7 @@ const ArmEncodingMap ArmMir2Lir::EncodingMap[kArmLast] = { ENCODING_MAP(kThumb2AdcRRR, 0xeb500000, /* setflags encoding */ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0, kFmtShift, -1, -1, - IS_QUAD_OP | REG_DEF0_USE12 | SETS_CCODES, + IS_QUAD_OP | REG_DEF0_USE12 | SETS_CCODES | USES_CCODES, "adcs", "!0C, !1C, !2C!3H", 4, kFixupNone), ENCODING_MAP(kThumb2AndRRR, 0xea000000, kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0, @@ -835,15 +835,15 @@ const ArmEncodingMap ArmMir2Lir::EncodingMap[kArmLast] = { "it:!1b", "!0c", 2, kFixupNone), ENCODING_MAP(kThumb2Fmstat, 0xeef1fa10, kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1, - kFmtUnused, -1, -1, NO_OPERAND | SETS_CCODES, + kFmtUnused, -1, -1, NO_OPERAND | SETS_CCODES | USES_CCODES, "fmstat", "", 4, kFixupNone), ENCODING_MAP(kThumb2Vcmpd, 0xeeb40b40, kFmtDfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1, - kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01, + kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01 | SETS_CCODES, "vcmp.f64", "!0S, !1S", 4, kFixupNone), ENCODING_MAP(kThumb2Vcmps, 0xeeb40a40, kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1, - kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01, + kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01 | SETS_CCODES, "vcmp.f32", "!0s, !1s", 4, kFixupNone), ENCODING_MAP(kThumb2LdrPcRel12, 0xf8df0000, kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 0, kFmtUnused, -1, -1, diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc index 3159886..2a4d27b 100644 --- a/compiler/dex/quick/arm/int_arm.cc +++ b/compiler/dex/quick/arm/int_arm.cc @@ -1079,6 +1079,7 @@ bool ArmMir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) { } LIR* ArmMir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) { + ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral); return RawLIR(current_dalvik_offset_, kThumb2LdrPcRel12, reg.GetReg(), 0, 0, 0, 0, target); } diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc index 806617b..aa5e5b4 100644 --- a/compiler/dex/quick/arm64/assemble_arm64.cc +++ b/compiler/dex/quick/arm64/assemble_arm64.cc @@ -111,7 +111,7 @@ namespace art { const A64EncodingMap Arm64Mir2Lir::EncodingMap[kA64Last] = { ENCODING_MAP(WIDE(kA64Adc3rrr), SF_VARIANTS(0x1a000000), kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16, - kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12, + kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | USES_CCODES, "adc", "!0r, !1r, !2r", kFixupNone), ENCODING_MAP(WIDE(kA64Add4RRdT), SF_VARIANTS(0x11000000), kFmtRegROrSp, 4, 0, kFmtRegROrSp, 9, 5, kFmtBitBlt, 21, 10, @@ -518,7 +518,7 @@ const A64EncodingMap Arm64Mir2Lir::EncodingMap[kA64Last] = { "ror", "!0r, !1r, !2r", kFixupNone), ENCODING_MAP(WIDE(kA64Sbc3rrr), SF_VARIANTS(0x5a000000), kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16, - kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12, + kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | USES_CCODES, "sbc", "!0r, !1r, !2r", kFixupNone), ENCODING_MAP(WIDE(kA64Sbfm4rrdd), SF_N_VARIANTS(0x13000000), kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtBitBlt, 21, 16, diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc index 7245853..8e3f4ef 100644 --- a/compiler/dex/quick/dex_file_method_inliner.cc +++ b/compiler/dex/quick/dex_file_method_inliner.cc @@ -427,7 +427,7 @@ bool DexFileMethodInliner::GenIntrinsic(Mir2Lir* backend, CallInfo* info) { InlineMethod intrinsic; { ReaderMutexLock mu(Thread::Current(), lock_); - auto it = inline_methods_.find(info->index); + auto it = inline_methods_.find(info->method_ref.dex_method_index); if (it == inline_methods_.end() || (it->second.flags & kInlineIntrinsic) == 0) { return false; } @@ -718,7 +718,7 @@ bool DexFileMethodInliner::AddInlineMethod(int32_t method_idx, const InlineMetho if (PrettyMethod(method_idx, *dex_file_) == "int java.lang.String.length()") { // TODO: String.length is both kIntrinsicIsEmptyOrLength and kInlineOpIGet. } else { - LOG(ERROR) << "Inliner: " << PrettyMethod(method_idx, *dex_file_) << " already inline"; + LOG(WARNING) << "Inliner: " << PrettyMethod(method_idx, *dex_file_) << " already inline"; } return false; } diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc index 3c9b7a3..afae89d 100644 --- a/compiler/dex/quick/gen_common.cc +++ b/compiler/dex/quick/gen_common.cc @@ -865,7 +865,12 @@ void Mir2Lir::HandleSlowPaths() { void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, Primitive::Type type, RegLocation rl_dest, RegLocation rl_obj) { const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir); - DCHECK_EQ(IGetMemAccessType(mir->dalvikInsn.opcode), field_info.MemAccessType()); + if (kIsDebugBuild) { + auto mem_access_type = IsInstructionIGetQuickOrIPutQuick(mir->dalvikInsn.opcode) ? + IGetQuickOrIPutQuickMemAccessType(mir->dalvikInsn.opcode) : + IGetMemAccessType(mir->dalvikInsn.opcode); + DCHECK_EQ(mem_access_type, field_info.MemAccessType()) << mir->dalvikInsn.opcode; + } cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet()); if (!ForceSlowFieldPath(cu_) && field_info.FastGet()) { RegisterClass reg_class = RegClassForFieldLoadStore(size, field_info.IsVolatile()); @@ -939,7 +944,12 @@ void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, Primitive::Type type void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size, RegLocation rl_src, RegLocation rl_obj) { const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir); - DCHECK_EQ(IPutMemAccessType(mir->dalvikInsn.opcode), field_info.MemAccessType()); + if (kIsDebugBuild) { + auto mem_access_type = IsInstructionIGetQuickOrIPutQuick(mir->dalvikInsn.opcode) ? + IGetQuickOrIPutQuickMemAccessType(mir->dalvikInsn.opcode) : + IPutMemAccessType(mir->dalvikInsn.opcode); + DCHECK_EQ(mem_access_type, field_info.MemAccessType()); + } cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut()); if (!ForceSlowFieldPath(cu_) && field_info.FastPut()) { RegisterClass reg_class = RegClassForFieldLoadStore(size, field_info.IsVolatile()); diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index 8e3df7c..01f1d37 100755 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -248,14 +248,16 @@ void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(QuickEntrypointEnum trampo if (arg0.wide == 0) { LoadValueDirectFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kNotWide)); if (arg1.wide == 0) { + // For Mips, when the 1st arg is integral, then remaining arg are passed in core reg. if (cu_->instruction_set == kMips) { - LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg1, kNotWide)); + LoadValueDirectFixed(arg1, TargetReg((arg1.fp && arg0.fp) ? kFArg2 : kArg1, kNotWide)); } else { LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg1 : kArg1, kNotWide)); } } else { + // For Mips, when the 1st arg is integral, then remaining arg are passed in core reg. if (cu_->instruction_set == kMips) { - LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kWide)); + LoadValueDirectWideFixed(arg1, TargetReg((arg1.fp && arg0.fp) ? kFArg2 : kArg2, kWide)); } else { LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg1 : kArg1, kWide)); } @@ -263,9 +265,19 @@ void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(QuickEntrypointEnum trampo } else { LoadValueDirectWideFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kWide)); if (arg1.wide == 0) { - LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kNotWide)); + // For Mips, when the 1st arg is integral, then remaining arg are passed in core reg. + if (cu_->instruction_set == kMips) { + LoadValueDirectFixed(arg1, TargetReg((arg1.fp && arg0.fp) ? kFArg2 : kArg2, kNotWide)); + } else { + LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kNotWide)); + } } else { - LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kWide)); + // For Mips, when the 1st arg is integral, then remaining arg are passed in core reg. + if (cu_->instruction_set == kMips) { + LoadValueDirectWideFixed(arg1, TargetReg((arg1.fp && arg0.fp) ? kFArg2 : kArg2, kWide)); + } else { + LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kWide)); + } } } } @@ -863,11 +875,12 @@ RegLocation Mir2Lir::InlineTarget(CallInfo* info) { RegLocation res; if (info->result.location == kLocInvalid) { // If result is unused, return a sink target based on type of invoke target. - res = GetReturn(ShortyToRegClass(mir_graph_->GetShortyFromTargetIdx(info->index)[0])); + res = GetReturn( + ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0])); } else { res = info->result; DCHECK_EQ(LocToRegClass(res), - ShortyToRegClass(mir_graph_->GetShortyFromTargetIdx(info->index)[0])); + ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0])); } return res; } @@ -876,11 +889,12 @@ RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) { RegLocation res; if (info->result.location == kLocInvalid) { // If result is unused, return a sink target based on type of invoke target. - res = GetReturnWide(ShortyToRegClass(mir_graph_->GetShortyFromTargetIdx(info->index)[0])); + res = GetReturnWide(ShortyToRegClass( + mir_graph_->GetShortyFromMethodReference(info->method_ref)[0])); } else { res = info->result; DCHECK_EQ(LocToRegClass(res), - ShortyToRegClass(mir_graph_->GetShortyFromTargetIdx(info->index)[0])); + ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0])); } return res; } @@ -1418,7 +1432,8 @@ bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long, void Mir2Lir::GenInvoke(CallInfo* info) { DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr); - if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file) + const DexFile* dex_file = info->method_ref.dex_file; + if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(dex_file) ->GenIntrinsic(this, info)) { return; } @@ -1428,7 +1443,7 @@ void Mir2Lir::GenInvoke(CallInfo* info) { void Mir2Lir::GenInvokeNoInline(CallInfo* info) { int call_state = 0; LIR* null_ck; - LIR** p_null_ck = NULL; + LIR** p_null_ck = nullptr; NextCallInsn next_call_insn; FlushAllRegs(); /* Everything to home location */ // Explicit register usage @@ -1440,6 +1455,7 @@ void Mir2Lir::GenInvokeNoInline(CallInfo* info) { info->type = method_info.GetSharpType(); bool fast_path = method_info.FastPath(); bool skip_this; + if (info->type == kInterface) { next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck; skip_this = fast_path; @@ -1469,7 +1485,8 @@ void Mir2Lir::GenInvokeNoInline(CallInfo* info) { // Finish up any of the call sequence not interleaved in arg loading while (call_state >= 0) { call_state = next_call_insn(cu_, info, call_state, target_method, method_info.VTableIndex(), - method_info.DirectCode(), method_info.DirectMethod(), original_type); + method_info.DirectCode(), method_info.DirectMethod(), + original_type); } LIR* call_insn = GenCallInsn(method_info); MarkSafepointPC(call_insn); diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc index ec6edab..2d26922 100644 --- a/compiler/dex/quick/mips/utility_mips.cc +++ b/compiler/dex/quick/mips/utility_mips.cc @@ -17,6 +17,7 @@ #include "codegen_mips.h" #include "arch/mips/instruction_set_features_mips.h" +#include "arch/mips/entrypoints_direct_mips.h" #include "base/logging.h" #include "dex/quick/mir_to_lir-inl.h" #include "dex/reg_storage_eq.h" @@ -708,7 +709,18 @@ LIR* MipsMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) { } LIR* MipsMir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) { - UNUSED(trampoline); // The address of the trampoline is already loaded into r_tgt. + if (IsDirectEntrypoint(trampoline)) { + // Reserve argument space on stack (for $a0-$a3) for + // entrypoints that directly reference native implementations. + // This is not safe in general, as it violates the frame size + // of the Quick method, but it is used here only for calling + // native functions, outside of the runtime. + OpRegImm(kOpSub, rs_rSP, 16); + LIR* retVal = OpReg(op, r_tgt); + OpRegImm(kOpAdd, rs_rSP, 16); + return retVal; + } + return OpReg(op, r_tgt); } diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc index bd4936b..8348626 100644 --- a/compiler/dex/quick/mir_to_lir.cc +++ b/compiler/dex/quick/mir_to_lir.cc @@ -540,6 +540,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list GenMoveException(rl_dest); break; + case Instruction::RETURN_VOID_BARRIER: case Instruction::RETURN_VOID: if (((cu_->access_flags & kAccConstructor) != 0) && cu_->compiler_driver->RequiresConstructorBarrier(Thread::Current(), cu_->dex_file, @@ -793,10 +794,12 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list GenArrayPut(opt_flags, kUnsignedByte, rl_src[1], rl_src[2], rl_src[0], 0, false); break; + case Instruction::IGET_OBJECT_QUICK: case Instruction::IGET_OBJECT: GenIGet(mir, opt_flags, kReference, Primitive::kPrimNot, rl_dest, rl_src[0]); break; + case Instruction::IGET_WIDE_QUICK: case Instruction::IGET_WIDE: // kPrimLong and kPrimDouble share the same entrypoints. if (rl_dest.fp) { @@ -806,6 +809,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list } break; + case Instruction::IGET_QUICK: case Instruction::IGET: if (rl_dest.fp) { GenIGet(mir, opt_flags, kSingle, Primitive::kPrimFloat, rl_dest, rl_src[0]); @@ -814,43 +818,54 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list } break; + case Instruction::IGET_CHAR_QUICK: case Instruction::IGET_CHAR: GenIGet(mir, opt_flags, kUnsignedHalf, Primitive::kPrimChar, rl_dest, rl_src[0]); break; + case Instruction::IGET_SHORT_QUICK: case Instruction::IGET_SHORT: GenIGet(mir, opt_flags, kSignedHalf, Primitive::kPrimShort, rl_dest, rl_src[0]); break; + case Instruction::IGET_BOOLEAN_QUICK: case Instruction::IGET_BOOLEAN: GenIGet(mir, opt_flags, kUnsignedByte, Primitive::kPrimBoolean, rl_dest, rl_src[0]); break; + case Instruction::IGET_BYTE_QUICK: case Instruction::IGET_BYTE: GenIGet(mir, opt_flags, kSignedByte, Primitive::kPrimByte, rl_dest, rl_src[0]); break; + case Instruction::IPUT_WIDE_QUICK: case Instruction::IPUT_WIDE: GenIPut(mir, opt_flags, rl_src[0].fp ? kDouble : k64, rl_src[0], rl_src[1]); break; + case Instruction::IPUT_OBJECT_QUICK: case Instruction::IPUT_OBJECT: GenIPut(mir, opt_flags, kReference, rl_src[0], rl_src[1]); break; + case Instruction::IPUT_QUICK: case Instruction::IPUT: GenIPut(mir, opt_flags, rl_src[0].fp ? kSingle : k32, rl_src[0], rl_src[1]); break; + case Instruction::IPUT_BYTE_QUICK: + case Instruction::IPUT_BOOLEAN_QUICK: case Instruction::IPUT_BYTE: case Instruction::IPUT_BOOLEAN: GenIPut(mir, opt_flags, kUnsignedByte, rl_src[0], rl_src[1]); break; + case Instruction::IPUT_CHAR_QUICK: case Instruction::IPUT_CHAR: GenIPut(mir, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1]); break; + case Instruction::IPUT_SHORT_QUICK: case Instruction::IPUT_SHORT: GenIPut(mir, opt_flags, kSignedHalf, rl_src[0], rl_src[1]); break; @@ -924,9 +939,12 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kDirect, true)); break; + case Instruction::INVOKE_VIRTUAL_QUICK: case Instruction::INVOKE_VIRTUAL: GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kVirtual, false)); break; + + case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: case Instruction::INVOKE_VIRTUAL_RANGE: GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kVirtual, true)); break; diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc index 19c2a5a..fcf4716 100644 --- a/compiler/dex/quick/quick_compiler.cc +++ b/compiler/dex/quick/quick_compiler.cc @@ -542,6 +542,11 @@ bool QuickCompiler::CanCompileMethod(uint32_t method_idx, const DexFile& dex_fil void QuickCompiler::InitCompilationUnit(CompilationUnit& cu) const { // Disable optimizations according to instruction set. cu.disable_opt |= kDisabledOptimizationsPerISA[cu.instruction_set]; + if (Runtime::Current()->UseJit()) { + // Disable these optimizations for JIT until quickened byte codes are done being implemented. + // TODO: Find a cleaner way to do this. + cu.disable_opt |= 1u << kLocalValueNumbering; + } } void QuickCompiler::Init() { diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc index 4ff173d..150bdac 100644 --- a/compiler/dex/verification_results.cc +++ b/compiler/dex/verification_results.cc @@ -66,11 +66,16 @@ bool VerificationResults::ProcessVerifiedMethod(verifier::MethodVerifier* method // TODO: Investigate why are we doing the work again for this method and try to avoid it. LOG(WARNING) << "Method processed more than once: " << PrettyMethod(ref.dex_method_index, *ref.dex_file); - DCHECK_EQ(it->second->GetDevirtMap().size(), verified_method->GetDevirtMap().size()); - DCHECK_EQ(it->second->GetSafeCastSet().size(), verified_method->GetSafeCastSet().size()); + if (!Runtime::Current()->UseJit()) { + DCHECK_EQ(it->second->GetDevirtMap().size(), verified_method->GetDevirtMap().size()); + DCHECK_EQ(it->second->GetSafeCastSet().size(), verified_method->GetSafeCastSet().size()); + } DCHECK_EQ(it->second->GetDexGcMap().size(), verified_method->GetDexGcMap().size()); - delete it->second; - verified_methods_.erase(it); + // Delete the new verified method since there was already an existing one registered. It + // is unsafe to replace the existing one since the JIT may be using it to generate a + // native GC map. + delete verified_method; + return true; } verified_methods_.Put(ref, verified_method); DCHECK(verified_methods_.find(ref) != verified_methods_.end()); diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc index 21e965d..42d66be 100644 --- a/compiler/dex/verified_method.cc +++ b/compiler/dex/verified_method.cc @@ -24,6 +24,7 @@ #include "base/stl_util.h" #include "dex_file.h" #include "dex_instruction-inl.h" +#include "dex_instruction_utils.h" #include "mirror/art_method-inl.h" #include "mirror/class-inl.h" #include "mirror/dex_cache-inl.h" @@ -52,6 +53,11 @@ const VerifiedMethod* VerifiedMethod::Create(verifier::MethodVerifier* method_ve if (method_verifier->HasVirtualOrInterfaceInvokes()) { verified_method->GenerateDevirtMap(method_verifier); } + + // Only need dequicken info for JIT so far. + if (Runtime::Current()->UseJit()) { + verified_method->GenerateDequickenMap(method_verifier); + } } if (method_verifier->HasCheckCasts()) { @@ -65,6 +71,12 @@ const MethodReference* VerifiedMethod::GetDevirtTarget(uint32_t dex_pc) const { return (it != devirt_map_.end()) ? &it->second : nullptr; } +const DexFileReference* VerifiedMethod::GetDequickenIndex(uint32_t dex_pc) const { + DCHECK(Runtime::Current()->UseJit()); + auto it = dequicken_map_.find(dex_pc); + return (it != dequicken_map_.end()) ? &it->second : nullptr; +} + bool VerifiedMethod::IsSafeCast(uint32_t pc) const { return std::binary_search(safe_cast_set_.begin(), safe_cast_set_.end(), pc); } @@ -182,7 +194,7 @@ void VerifiedMethod::ComputeGcMapSizes(verifier::MethodVerifier* method_verifier *log2_max_gc_pc = i; } -void VerifiedMethod::GenerateDeQuickenMap(verifier::MethodVerifier* method_verifier) { +void VerifiedMethod::GenerateDequickenMap(verifier::MethodVerifier* method_verifier) { if (method_verifier->HasFailures()) { return; } @@ -196,13 +208,24 @@ void VerifiedMethod::GenerateDeQuickenMap(verifier::MethodVerifier* method_verif if (is_virtual_quick || is_range_quick) { uint32_t dex_pc = inst->GetDexPc(insns); verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc); - mirror::ArtMethod* method = method_verifier->GetQuickInvokedMethod(inst, line, - is_range_quick); + mirror::ArtMethod* method = + method_verifier->GetQuickInvokedMethod(inst, line, is_range_quick); CHECK(method != nullptr); // The verifier must know what the type of the object was or else we would have gotten a // failure. Put the dex method index in the dequicken map since we need this to get number of // arguments in the compiler. - dequicken_map_.Put(dex_pc, method->ToMethodReference()); + dequicken_map_.Put(dex_pc, DexFileReference(method->GetDexFile(), + method->GetDexMethodIndex())); + } else if (IsInstructionIGetQuickOrIPutQuick(inst->Opcode())) { + uint32_t dex_pc = inst->GetDexPc(insns); + verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc); + mirror::ArtField* field = method_verifier->GetQuickFieldAccess(inst, line); + CHECK(field != nullptr); + // The verifier must know what the type of the field was or else we would have gotten a + // failure. Put the dex field index in the dequicken map since we need this for lowering + // in the compiler. + // TODO: Putting a field index in a method reference is gross. + dequicken_map_.Put(dex_pc, DexFileReference(field->GetDexFile(), field->GetDexFieldIndex())); } } } diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h index fe9dfd1..748bdcb 100644 --- a/compiler/dex/verified_method.h +++ b/compiler/dex/verified_method.h @@ -20,6 +20,7 @@ #include <vector> #include "base/mutex.h" +#include "dex_file.h" #include "method_reference.h" #include "safe_map.h" @@ -39,6 +40,9 @@ class VerifiedMethod { // Devirtualization map type maps dex offset to concrete method reference. typedef SafeMap<uint32_t, MethodReference> DevirtualizationMap; + // Devirtualization map type maps dex offset to field / method idx. + typedef SafeMap<uint32_t, DexFileReference> DequickenMap; + static const VerifiedMethod* Create(verifier::MethodVerifier* method_verifier, bool compile) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ~VerifiedMethod() = default; @@ -58,6 +62,10 @@ class VerifiedMethod { // Returns the devirtualization target method, or nullptr if none. const MethodReference* GetDevirtTarget(uint32_t dex_pc) const; + // Returns the dequicken field / method for a quick invoke / field get. Returns null if there is + // no entry for that dex pc. + const DexFileReference* GetDequickenIndex(uint32_t dex_pc) const; + // Returns true if the cast can statically be verified to be redundant // by using the check-cast elision peephole optimization in the verifier. bool IsSafeCast(uint32_t pc) const; @@ -86,7 +94,7 @@ class VerifiedMethod { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Generate dequickening map into dequicken_map_. - void GenerateDeQuickenMap(verifier::MethodVerifier* method_verifier) + void GenerateDequickenMap(verifier::MethodVerifier* method_verifier) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Generate safe case set into safe_cast_set_. @@ -95,9 +103,9 @@ class VerifiedMethod { std::vector<uint8_t> dex_gc_map_; DevirtualizationMap devirt_map_; - // Dequicken map is required for having the compiler compiled quickened invokes. The quicken map - // enables us to get the dex method index so that we can get the required argument count. - DevirtualizationMap dequicken_map_; + // Dequicken map is required for compiling quickened byte codes. The quicken maps from + // dex PC to dex method index or dex field index based on the instruction. + DequickenMap dequicken_map_; SafeCastSet safe_cast_set_; }; diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc index b620969..2b78e38 100644 --- a/compiler/dex/vreg_analysis.cc +++ b/compiler/dex/vreg_analysis.cc @@ -19,6 +19,7 @@ #include "compiler_ir.h" #include "dex/dataflow_iterator-inl.h" #include "dex_flags.h" +#include "driver/dex_compilation_unit.h" namespace art { @@ -259,8 +260,8 @@ bool MIRGraph::InferTypeAndSize(BasicBlock* bb, MIR* mir, bool changed) { if ((flags & Instruction::kInvoke) && (attrs & (DF_FORMAT_35C | DF_FORMAT_3RC))) { DCHECK_EQ(next, 0); - int target_idx = mir->dalvikInsn.vB; - const char* shorty = GetShortyFromTargetIdx(target_idx); + const auto& lowering_info = GetMethodLoweringInfo(mir); + const char* shorty = GetShortyFromMethodReference(lowering_info.GetTargetMethod()); // Handle result type if floating point if ((shorty[0] == 'F') || (shorty[0] == 'D')) { MIR* move_result_mir = FindMoveResult(bb, mir); |