summaryrefslogtreecommitdiffstats
path: root/compiler/dex
diff options
context:
space:
mode:
authorIan Rogers <irogers@google.com>2014-09-29 18:31:02 +0000
committerGerrit Code Review <noreply-gerritcodereview@google.com>2014-09-29 18:31:02 +0000
commit1edf638010c4b15b87f865d180c3b95026827e9a (patch)
treebc752c9dd1f36cf0421e56c267e772e7c7ebf43f /compiler/dex
parentf731a78809ac61a9085781370f0f38ef88305276 (diff)
parent750359753444498d509a756fa9a042e9f3c432df (diff)
downloadart-1edf638010c4b15b87f865d180c3b95026827e9a.zip
art-1edf638010c4b15b87f865d180c3b95026827e9a.tar.gz
art-1edf638010c4b15b87f865d180c3b95026827e9a.tar.bz2
Merge "ART: Deprecate CompilationUnit's code_item"
Diffstat (limited to 'compiler/dex')
-rw-r--r--compiler/dex/compiler_ir.cc1
-rw-r--r--compiler/dex/compiler_ir.h1
-rw-r--r--compiler/dex/global_value_numbering_test.cc2
-rw-r--r--compiler/dex/mir_analysis.cc14
-rw-r--r--compiler/dex/mir_dataflow.cc50
-rw-r--r--compiler/dex/mir_graph.cc36
-rw-r--r--compiler/dex/mir_graph.h10
-rw-r--r--compiler/dex/mir_graph_test.cc2
-rw-r--r--compiler/dex/mir_optimization.cc2
-rw-r--r--compiler/dex/mir_optimization_test.cc2
-rw-r--r--compiler/dex/portable/mir_to_gbc.cc2
-rw-r--r--compiler/dex/quick/arm/int_arm.cc2
-rw-r--r--compiler/dex/quick/codegen_util.cc2
-rw-r--r--compiler/dex/quick/ralloc_util.cc3
14 files changed, 53 insertions, 76 deletions
diff --git a/compiler/dex/compiler_ir.cc b/compiler/dex/compiler_ir.cc
index ce48eb2..909c995 100644
--- a/compiler/dex/compiler_ir.cc
+++ b/compiler/dex/compiler_ir.cc
@@ -29,7 +29,6 @@ CompilationUnit::CompilationUnit(ArenaPool* pool)
class_loader(nullptr),
class_def_idx(0),
method_idx(0),
- code_item(nullptr),
access_flags(0),
invoke_type(kDirect),
shorty(nullptr),
diff --git a/compiler/dex/compiler_ir.h b/compiler/dex/compiler_ir.h
index c4e43fd..37e3a7a 100644
--- a/compiler/dex/compiler_ir.h
+++ b/compiler/dex/compiler_ir.h
@@ -58,7 +58,6 @@ struct CompilationUnit {
jobject class_loader; // compiling method's class loader.
uint16_t class_def_idx; // compiling method's defining class definition index.
uint32_t method_idx; // compiling method's index into method_ids of DexFile.
- const DexFile::CodeItem* code_item; // compiling method's DexFile code_item.
uint32_t access_flags; // compiling method's access flags.
InvokeType invoke_type; // compiling method's invocation type.
const char* shorty; // compiling method's shorty.
diff --git a/compiler/dex/global_value_numbering_test.cc b/compiler/dex/global_value_numbering_test.cc
index c808234..1d9920d 100644
--- a/compiler/dex/global_value_numbering_test.cc
+++ b/compiler/dex/global_value_numbering_test.cc
@@ -260,7 +260,7 @@ class GlobalValueNumberingTest : public testing::Test {
DexFile::CodeItem* code_item = static_cast<DexFile::CodeItem*>(
cu_.arena.Alloc(sizeof(DexFile::CodeItem), kArenaAllocMisc));
code_item->insns_size_in_code_units_ = 2u * count;
- cu_.mir_graph->current_code_item_ = cu_.code_item = code_item;
+ cu_.mir_graph->current_code_item_ = code_item;
}
template <size_t count>
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index 6ef3cea..ee48796 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -1202,7 +1202,7 @@ bool MIRGraph::SkipCompilation(std::string* skip_message) {
void MIRGraph::DoCacheFieldLoweringInfo() {
// All IGET/IPUT/SGET/SPUT instructions take 2 code units and there must also be a RETURN.
- const uint32_t max_refs = (current_code_item_->insns_size_in_code_units_ - 1u) / 2u;
+ const uint32_t max_refs = (GetNumDalvikInsns() - 1u) / 2u;
ScopedArenaAllocator allocator(&cu_->arena_stack);
uint16_t* field_idxs =
reinterpret_cast<uint16_t*>(allocator.Alloc(max_refs * sizeof(uint16_t), kArenaAllocMisc));
@@ -1218,12 +1218,11 @@ void MIRGraph::DoCacheFieldLoweringInfo() {
for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
if (mir->dalvikInsn.opcode >= Instruction::IGET &&
mir->dalvikInsn.opcode <= Instruction::SPUT_SHORT) {
- const Instruction* insn = Instruction::At(current_code_item_->insns_ + mir->offset);
// Get field index and try to find it among existing indexes. If found, it's usually among
// the last few added, so we'll start the search from ifield_pos/sfield_pos. Though this
// is a linear search, it actually performs much better than map based approach.
if (mir->dalvikInsn.opcode <= Instruction::IPUT_SHORT) {
- uint16_t field_idx = insn->VRegC_22c();
+ uint16_t field_idx = mir->dalvikInsn.vC;
size_t i = ifield_pos;
while (i != 0u && field_idxs[i - 1] != field_idx) {
--i;
@@ -1235,7 +1234,7 @@ void MIRGraph::DoCacheFieldLoweringInfo() {
field_idxs[ifield_pos++] = field_idx;
}
} else {
- uint16_t field_idx = insn->VRegB_21c();
+ uint16_t field_idx = mir->dalvikInsn.vB;
size_t i = sfield_pos;
while (i != max_refs && field_idxs[i] != field_idx) {
++i;
@@ -1315,7 +1314,7 @@ void MIRGraph::DoCacheMethodLoweringInfo() {
ScopedArenaAllocator allocator(&cu_->arena_stack);
// All INVOKE instructions take 3 code units and there must also be a RETURN.
- uint32_t max_refs = (current_code_item_->insns_size_in_code_units_ - 1u) / 3u;
+ uint32_t max_refs = (GetNumDalvikInsns() - 1u) / 3u;
// Map invoke key (see MapEntry) to lowering info index and vice versa.
// The invoke_map and sequential entries are essentially equivalent to Boost.MultiIndex's
@@ -1336,14 +1335,13 @@ void MIRGraph::DoCacheMethodLoweringInfo() {
mir->dalvikInsn.opcode <= Instruction::INVOKE_INTERFACE_RANGE &&
mir->dalvikInsn.opcode != Instruction::RETURN_VOID_BARRIER) {
// Decode target method index and invoke type.
- const Instruction* insn = Instruction::At(current_code_item_->insns_ + mir->offset);
uint16_t target_method_idx;
uint16_t invoke_type_idx;
if (mir->dalvikInsn.opcode <= Instruction::INVOKE_INTERFACE) {
- target_method_idx = insn->VRegB_35c();
+ target_method_idx = mir->dalvikInsn.vB;
invoke_type_idx = mir->dalvikInsn.opcode - Instruction::INVOKE_VIRTUAL;
} else {
- target_method_idx = insn->VRegB_3rc();
+ target_method_idx = mir->dalvikInsn.vB;
invoke_type_idx = mir->dalvikInsn.opcode - Instruction::INVOKE_VIRTUAL_RANGE;
}
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index fd8546e..e71c806 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -1335,56 +1335,6 @@ void MIRGraph::CompilerInitializeSSAConversion() {
}
/*
- * This function will make a best guess at whether the invoke will
- * end up using Method*. It isn't critical to get it exactly right,
- * and attempting to do would involve more complexity than it's
- * worth.
- */
-bool MIRGraph::InvokeUsesMethodStar(MIR* mir) {
- InvokeType type;
- Instruction::Code opcode = mir->dalvikInsn.opcode;
- switch (opcode) {
- case Instruction::INVOKE_STATIC:
- case Instruction::INVOKE_STATIC_RANGE:
- type = kStatic;
- break;
- case Instruction::INVOKE_DIRECT:
- case Instruction::INVOKE_DIRECT_RANGE:
- type = kDirect;
- break;
- case Instruction::INVOKE_VIRTUAL:
- case Instruction::INVOKE_VIRTUAL_RANGE:
- type = kVirtual;
- break;
- case Instruction::INVOKE_INTERFACE:
- case Instruction::INVOKE_INTERFACE_RANGE:
- return false;
- case Instruction::INVOKE_SUPER_RANGE:
- case Instruction::INVOKE_SUPER:
- type = kSuper;
- break;
- default:
- LOG(WARNING) << "Unexpected invoke op: " << opcode;
- return false;
- }
- DexCompilationUnit m_unit(cu_);
- MethodReference target_method(cu_->dex_file, mir->dalvikInsn.vB);
- int vtable_idx;
- uintptr_t direct_code;
- uintptr_t direct_method;
- uint32_t current_offset = static_cast<uint32_t>(current_offset_);
- bool fast_path =
- cu_->compiler_driver->ComputeInvokeInfo(&m_unit, current_offset,
- false, true,
- &type, &target_method,
- &vtable_idx,
- &direct_code, &direct_method) &&
- !(cu_->enable_debug & (1 << kDebugSlowInvokePath));
- return (((type == kDirect) || (type == kStatic)) &&
- fast_path && ((direct_code == 0) || (direct_method == 0)));
-}
-
-/*
* Count uses, weighting by loop nesting depth. This code only
* counts explicitly used s_regs. A later phase will add implicit
* counts for things such as Method*, null-checked references, etc.
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 7c0a996..bd7e4f7 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -708,7 +708,6 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
cu_->access_flags = access_flags;
cu_->invoke_type = invoke_type;
cu_->shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx));
- cu_->code_item = current_code_item_;
} else {
UNIMPLEMENTED(FATAL) << "Nested inlining not implemented.";
/*
@@ -1404,9 +1403,11 @@ char* MIRGraph::GetDalvikDisassembly(const MIR* mir) {
opcode = insn.opcode;
} else if (opcode == kMirOpNop) {
str.append("[");
- // Recover original opcode.
- insn.opcode = Instruction::At(current_code_item_->insns_ + mir->offset)->Opcode();
- opcode = insn.opcode;
+ if (mir->offset < current_code_item_->insns_size_in_code_units_) {
+ // Recover original opcode.
+ insn.opcode = Instruction::At(current_code_item_->insns_ + mir->offset)->Opcode();
+ opcode = insn.opcode;
+ }
nop = true;
}
int defs = (ssa_rep != NULL) ? ssa_rep->num_defs : 0;
@@ -1700,6 +1701,33 @@ void MIRGraph::SSATransformationEnd() {
temp_scoped_alloc_.reset();
}
+size_t MIRGraph::GetNumDalvikInsns() const {
+ size_t cumulative_size = 0u;
+ bool counted_current_item = false;
+ const uint8_t size_for_null_code_item = 2u;
+
+ for (auto it : m_units_) {
+ const DexFile::CodeItem* code_item = it->GetCodeItem();
+ // Even if the code item is null, we still count non-zero value so that
+ // each m_unit is counted as having impact.
+ cumulative_size += (code_item == nullptr ?
+ size_for_null_code_item : code_item->insns_size_in_code_units_);
+ if (code_item == current_code_item_) {
+ counted_current_item = true;
+ }
+ }
+
+ // If the current code item was not counted yet, count it now.
+ // This can happen for example in unit tests where some fields like m_units_
+ // are not initialized.
+ if (counted_current_item == false) {
+ cumulative_size += (current_code_item_ == nullptr ?
+ size_for_null_code_item : current_code_item_->insns_size_in_code_units_);
+ }
+
+ return cumulative_size;
+}
+
static BasicBlock* SelectTopologicalSortOrderFallBack(
MIRGraph* mir_graph, const ArenaBitVector* current_loop,
const ScopedArenaVector<size_t>* visited_cnt_values, ScopedArenaAllocator* allocator,
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 012b435..f53ec89 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -579,9 +579,12 @@ class MIRGraph {
return num_blocks_;
}
- size_t GetNumDalvikInsns() const {
- return cu_->code_item->insns_size_in_code_units_;
- }
+ /**
+ * @brief Provides the total size in code units of all instructions in MIRGraph.
+ * @details Includes the sizes of all methods in compilation unit.
+ * @return Returns the cumulative sum of all insn sizes (in code units).
+ */
+ size_t GetNumDalvikInsns() const;
ArenaBitVector* GetTryBlockAddr() const {
return try_block_addr_;
@@ -1187,7 +1190,6 @@ class MIRGraph {
ArenaBitVector* live_in_v,
const MIR::DecodedInstruction& d_insn);
bool DoSSAConversion(BasicBlock* bb);
- bool InvokeUsesMethodStar(MIR* mir);
int ParseInsn(const uint16_t* code_ptr, MIR::DecodedInstruction* decoded_instruction);
bool ContentIsInsn(const uint16_t* code_ptr);
BasicBlock* SplitBlock(DexOffset code_offset, BasicBlock* orig_block,
diff --git a/compiler/dex/mir_graph_test.cc b/compiler/dex/mir_graph_test.cc
index e34759b..a96cd84 100644
--- a/compiler/dex/mir_graph_test.cc
+++ b/compiler/dex/mir_graph_test.cc
@@ -98,7 +98,7 @@ class TopologicalSortOrderTest : public testing::Test {
DexFile::CodeItem* code_item = static_cast<DexFile::CodeItem*>(cu_.arena.Alloc(sizeof(DexFile::CodeItem),
kArenaAllocMisc));
- cu_.mir_graph->current_code_item_ = cu_.code_item = code_item;
+ cu_.mir_graph->current_code_item_ = code_item;
}
template <size_t count>
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 41f63c1..dac71f6 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -1066,7 +1066,7 @@ bool MIRGraph::EliminateClassInitChecksGate() {
temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
// Each insn we use here has at least 2 code units, offset/2 will be a unique index.
- const size_t end = (cu_->code_item->insns_size_in_code_units_ + 1u) / 2u;
+ const size_t end = (GetNumDalvikInsns() + 1u) / 2u;
temp_insn_data_ = static_cast<uint16_t*>(
temp_scoped_alloc_->Alloc(end * sizeof(*temp_insn_data_), kArenaAllocGrowableArray));
diff --git a/compiler/dex/mir_optimization_test.cc b/compiler/dex/mir_optimization_test.cc
index 6272332..55e547e 100644
--- a/compiler/dex/mir_optimization_test.cc
+++ b/compiler/dex/mir_optimization_test.cc
@@ -171,7 +171,7 @@ class ClassInitCheckEliminationTest : public testing::Test {
cu_.arena.Alloc(sizeof(DexFile::CodeItem), kArenaAllocMisc));
memset(code_item_, 0, sizeof(DexFile::CodeItem));
code_item_->insns_size_in_code_units_ = 2u * count;
- cu_.mir_graph->current_code_item_ = cu_.code_item = code_item_;
+ cu_.mir_graph->current_code_item_ = code_item_;
}
template <size_t count>
diff --git a/compiler/dex/portable/mir_to_gbc.cc b/compiler/dex/portable/mir_to_gbc.cc
index 18155d1..ba255e0 100644
--- a/compiler/dex/portable/mir_to_gbc.cc
+++ b/compiler/dex/portable/mir_to_gbc.cc
@@ -1963,7 +1963,7 @@ void MirConverter::MethodMIR2Bitcode() {
if (::llvm::verifyFunction(*func_, ::llvm::PrintMessageAction)) {
LOG(INFO) << "Bitcode verification FAILED for "
<< PrettyMethod(cu_->method_idx, *cu_->dex_file)
- << " of size " << cu_->code_item->insns_size_in_code_units_;
+ << " of size " << mir_graph_->GetNumDalvikInsns();
cu_->enable_debug |= (1 << kDebugDumpBitcodeFile);
}
}
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 0de2a44..1a4b23e 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -377,7 +377,7 @@ LIR* ArmMir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_va
* TODO: consider interspersing slowpaths in code following unconditional branches.
*/
bool skip = ((target != NULL) && (target->opcode == kPseudoThrowTarget));
- skip &= ((cu_->code_item->insns_size_in_code_units_ - current_dalvik_offset_) > 64);
+ skip &= ((mir_graph_->GetNumDalvikInsns() - current_dalvik_offset_) > 64);
if (!skip && reg.Low8() && (check_value == 0)) {
if (arm_cond == kArmCondEq || arm_cond == kArmCondNe) {
branch = NewLIR2((arm_cond == kArmCondEq) ? kThumb2Cbz : kThumb2Cbnz,
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 3135892..f305017 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -319,7 +319,7 @@ void Mir2Lir::CodegenDump() {
LOG(INFO) << "Dumping LIR insns for "
<< PrettyMethod(cu_->method_idx, *cu_->dex_file);
LIR* lir_insn;
- int insns_size = cu_->code_item->insns_size_in_code_units_;
+ int insns_size = mir_graph_->GetNumDalvikInsns();
LOG(INFO) << "Regs (excluding ins) : " << mir_graph_->GetNumOfLocalCodeVRs();
LOG(INFO) << "Ins : " << mir_graph_->GetNumOfInVRs();
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index b171c78..6305b22 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -1323,7 +1323,8 @@ void Mir2Lir::DoPromotion() {
/* Returns sp-relative offset in bytes for a VReg */
int Mir2Lir::VRegOffset(int v_reg) {
- return StackVisitor::GetVRegOffset(cu_->code_item, core_spill_mask_,
+ const DexFile::CodeItem* code_item = mir_graph_->GetCurrentDexCompilationUnit()->GetCodeItem();
+ return StackVisitor::GetVRegOffset(code_item, core_spill_mask_,
fp_spill_mask_, frame_size_, v_reg,
cu_->instruction_set);
}