diff options
Diffstat (limited to 'compiler/dex/mir_graph.h')
-rw-r--r-- | compiler/dex/mir_graph.h | 67 |
1 files changed, 38 insertions, 29 deletions
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h index 0db54bf..d6dc566 100644 --- a/compiler/dex/mir_graph.h +++ b/compiler/dex/mir_graph.h @@ -39,7 +39,6 @@ class DexFileMethodInliner; class GlobalValueNumbering; class GvnDeadCodeElimination; class PassManager; -class TypeInference; // Forward declaration. class MIRGraph; @@ -65,7 +64,6 @@ enum DataFlowAttributePos { kNullTransferSrc0, // Object copy src[0] -> dst. kNullTransferSrcN, // Phi null check state transfer. kRangeCheckC, // Range check of C. - kCheckCastA, // Check cast of A. kFPA, kFPB, kFPC, @@ -75,7 +73,6 @@ enum DataFlowAttributePos { kRefA, kRefB, kRefC, - kSameTypeAB, // A and B have the same type but it can be core/ref/fp (IF_cc). kUsesMethodStar, // Implicit use of Method*. kUsesIField, // Accesses an instance field (IGET/IPUT). kUsesSField, // Accesses a static field (SGET/SPUT). @@ -104,7 +101,6 @@ enum DataFlowAttributePos { #define DF_NULL_TRANSFER_0 (UINT64_C(1) << kNullTransferSrc0) #define DF_NULL_TRANSFER_N (UINT64_C(1) << kNullTransferSrcN) #define DF_RANGE_CHK_C (UINT64_C(1) << kRangeCheckC) -#define DF_CHK_CAST (UINT64_C(1) << kCheckCastA) #define DF_FP_A (UINT64_C(1) << kFPA) #define DF_FP_B (UINT64_C(1) << kFPB) #define DF_FP_C (UINT64_C(1) << kFPC) @@ -114,7 +110,6 @@ enum DataFlowAttributePos { #define DF_REF_A (UINT64_C(1) << kRefA) #define DF_REF_B (UINT64_C(1) << kRefB) #define DF_REF_C (UINT64_C(1) << kRefC) -#define DF_SAME_TYPE_AB (UINT64_C(1) << kSameTypeAB) #define DF_UMS (UINT64_C(1) << kUsesMethodStar) #define DF_IFIELD (UINT64_C(1) << kUsesIField) #define DF_SFIELD (UINT64_C(1) << kUsesSField) @@ -222,11 +217,13 @@ struct BasicBlockDataFlow { */ struct SSARepresentation { int32_t* uses; + bool* fp_use; int32_t* defs; - uint16_t num_uses_allocated; - uint16_t num_defs_allocated; - uint16_t num_uses; - uint16_t num_defs; + bool* fp_def; + int16_t num_uses_allocated; + int16_t num_defs_allocated; + int16_t num_uses; + int16_t num_defs; static uint32_t GetStartUseIndex(Instruction::Code opcode); }; @@ -337,8 +334,7 @@ class MIR : public ArenaObject<kArenaAllocMIR> { // SGET/SPUT lowering info index, points to MIRGraph::sfield_lowering_infos_. Due to limit on // the number of code points (64K) and size of SGET/SPUT insn (2), this will never exceed 32K. uint32_t sfield_lowering_info; - // INVOKE data index, points to MIRGraph::method_lowering_infos_. Also used for inlined - // CONST and MOVE insn (with MIR_CALLEE) to remember the invoke for type inference. + // INVOKE data index, points to MIRGraph::method_lowering_infos_. uint32_t method_lowering_info; } meta; @@ -651,10 +647,6 @@ class MIRGraph { */ void DumpCFG(const char* dir_prefix, bool all_blocks, const char* suffix = nullptr); - bool HasCheckCast() const { - return (merged_df_flags_ & DF_CHK_CAST) != 0u; - } - bool HasFieldAccess() const { return (merged_df_flags_ & (DF_IFIELD | DF_SFIELD)) != 0u; } @@ -699,16 +691,8 @@ class MIRGraph { void DoCacheMethodLoweringInfo(); const MirMethodLoweringInfo& GetMethodLoweringInfo(MIR* mir) const { - return GetMethodLoweringInfo(mir->meta.method_lowering_info); - } - - const MirMethodLoweringInfo& GetMethodLoweringInfo(uint32_t lowering_info) const { - DCHECK_LT(lowering_info, method_lowering_infos_.size()); - return method_lowering_infos_[lowering_info]; - } - - size_t GetMethodLoweringInfoCount() const { - return method_lowering_infos_.size(); + DCHECK_LT(mir->meta.method_lowering_info, method_lowering_infos_.size()); + return method_lowering_infos_[mir->meta.method_lowering_info]; } void ComputeInlineIFieldLoweringInfo(uint16_t field_idx, MIR* invoke, MIR* iget_or_iput); @@ -1089,9 +1073,7 @@ class MIRGraph { bool EliminateNullChecksGate(); bool EliminateNullChecks(BasicBlock* bb); void EliminateNullChecksEnd(); - void InferTypesStart(); bool InferTypes(BasicBlock* bb); - void InferTypesEnd(); bool EliminateClassInitChecksGate(); bool EliminateClassInitChecks(BasicBlock* bb); void EliminateClassInitChecksEnd(); @@ -1118,6 +1100,34 @@ class MIRGraph { return temp_.gvn.sfield_ids[mir->meta.sfield_lowering_info]; } + /* + * Type inference handling helpers. Because Dalvik's bytecode is not fully typed, + * we have to do some work to figure out the sreg type. For some operations it is + * clear based on the opcode (i.e. ADD_FLOAT v0, v1, v2), but for others (MOVE), we + * may never know the "real" type. + * + * We perform the type inference operation by using an iterative walk over + * the graph, propagating types "defined" by typed opcodes to uses and defs in + * non-typed opcodes (such as MOVE). The Setxx(index) helpers are used to set defined + * types on typed opcodes (such as ADD_INT). The Setxx(index, is_xx) form is used to + * propagate types through non-typed opcodes such as PHI and MOVE. The is_xx flag + * tells whether our guess of the type is based on a previously typed definition. + * If so, the defined type takes precedence. Note that it's possible to have the same sreg + * show multiple defined types because dx treats constants as untyped bit patterns. + * The return value of the Setxx() helpers says whether or not the Setxx() action changed + * the current guess, and is used to know when to terminate the iterative walk. + */ + bool SetFp(int index, bool is_fp); + bool SetFp(int index); + bool SetCore(int index, bool is_core); + bool SetCore(int index); + bool SetRef(int index, bool is_ref); + bool SetRef(int index); + bool SetWide(int index, bool is_wide); + bool SetWide(int index); + bool SetHigh(int index, bool is_high); + bool SetHigh(int index); + bool PuntToInterpreter() { return punt_to_interpreter_; } @@ -1242,6 +1252,7 @@ class MIRGraph { static const char* extended_mir_op_names_[kMirOpLast - kMirOpFirst]; void HandleSSADef(int* defs, int dalvik_reg, int reg_index); + bool InferTypeAndSize(BasicBlock* bb, MIR* mir, bool changed); protected: int FindCommonParent(int block1, int block2); @@ -1388,7 +1399,6 @@ class MIRGraph { ArenaBitVector* work_live_vregs; ArenaBitVector** def_block_matrix; // num_vregs x num_blocks_. ArenaBitVector** phi_node_blocks; // num_vregs x num_blocks_. - TypeInference* ti; } ssa; // Global value numbering. struct { @@ -1448,7 +1458,6 @@ class MIRGraph { friend class GvnDeadCodeEliminationTest; friend class LocalValueNumberingTest; friend class TopologicalSortOrderTest; - friend class TypeInferenceTest; friend class QuickCFITest; }; |