diff options
author | buzbee <buzbee@google.com> | 2013-10-11 10:49:56 -0700 |
---|---|---|
committer | buzbee <buzbee@google.com> | 2013-10-11 10:56:31 -0700 |
commit | 409fe94ad529d9334587be80b9f6a3d166805508 (patch) | |
tree | 1bd570273fcdf0f99d3ef273ad1b5a4b47acd3d6 | |
parent | 29f86e5d7631e58fbe6fbd7888cfb2b0851417b4 (diff) | |
download | art-409fe94ad529d9334587be80b9f6a3d166805508.zip art-409fe94ad529d9334587be80b9f6a3d166805508.tar.gz art-409fe94ad529d9334587be80b9f6a3d166805508.tar.bz2 |
Quick assembler fix
This CL re-instates the select pattern optimization disabled by
CL 374310, and fixes the underlying problem: improper handling of
the kPseudoBarrier LIR opcode. The bug was introduced in the
recent assembler restructuring. In short, LIR pseudo opcodes (which
have values < 0), should always have size 0 - and thus cause no
bits to be emitted during assembly. In this case, bad logic caused
us to set the size of a kPseudoBarrier opcode via lookup through the
EncodingMap.
Because all pseudo ops are < 0, this meant we did an array underflow
load, picking up whatever garbage was located before the EncodingMap.
This explains why this error showed up recently - we'd previuosly just
gotten a lucky layout.
This CL corrects the faulty logic, and adds DCHECKs to uses of
the EncodingMap to ensure that we don't try to access w/ a
pseudo op. Additionally, the existing is_pseudo_op() macro is
replaced with IsPseudoLirOp(), named similar to the existing
IsPseudoMirOp().
Change-Id: I46761a0275a923d85b545664cadf052e1ab120dc
-rw-r--r-- | compiler/dex/mir_optimization.cc | 3 | ||||
-rw-r--r-- | compiler/dex/quick/arm/assemble_arm.cc | 5 | ||||
-rw-r--r-- | compiler/dex/quick/arm/target_arm.cc | 3 | ||||
-rw-r--r-- | compiler/dex/quick/arm/utility_arm.cc | 4 | ||||
-rw-r--r-- | compiler/dex/quick/local_optimizations.cc | 12 | ||||
-rw-r--r-- | compiler/dex/quick/mips/assemble_mips.cc | 3 | ||||
-rw-r--r-- | compiler/dex/quick/mips/target_mips.cc | 3 | ||||
-rw-r--r-- | compiler/dex/quick/mir_to_lir-inl.h | 18 | ||||
-rw-r--r-- | compiler/dex/quick/mir_to_lir.h | 5 | ||||
-rw-r--r-- | compiler/dex/quick/x86/assemble_x86.cc | 5 | ||||
-rw-r--r-- | compiler/dex/quick/x86/target_x86.cc | 3 |
11 files changed, 41 insertions, 23 deletions
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc index 0b4f041..05e428e 100644 --- a/compiler/dex/mir_optimization.cc +++ b/compiler/dex/mir_optimization.cc @@ -325,8 +325,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) { // Is this the select pattern? // TODO: flesh out support for Mips and X86. NOTE: llvm's select op doesn't quite work here. // TUNING: expand to support IF_xx compare & branches - if (false && - !(cu_->compiler_backend == kPortable) && (cu_->instruction_set == kThumb2) && + if (!(cu_->compiler_backend == kPortable) && (cu_->instruction_set == kThumb2) && ((mir->dalvikInsn.opcode == Instruction::IF_EQZ) || (mir->dalvikInsn.opcode == Instruction::IF_NEZ))) { BasicBlock* ft = bb->fall_through; diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc index dac3a21..3c646c4 100644 --- a/compiler/dex/quick/arm/assemble_arm.cc +++ b/compiler/dex/quick/arm/assemble_arm.cc @@ -1021,7 +1021,7 @@ void ArmMir2Lir::InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) { void ArmMir2Lir::EncodeLIR(LIR* lir) { int opcode = lir->opcode; - if (opcode < 0) { + if (IsPseudoLirOp(opcode)) { if (UNLIKELY(opcode == kPseudoPseudoAlign4)) { // Note: size for this opcode will be either 0 or 2 depending on final alignment. lir->u.a.bytes[0] = (PADDING_MOV_R5_R5 & 0xff); @@ -1594,6 +1594,7 @@ void ArmMir2Lir::AssembleLIR() { } int ArmMir2Lir::GetInsnSize(LIR* lir) { + DCHECK(!IsPseudoLirOp(lir->opcode)); return EncodingMap[lir->opcode].size; } @@ -1613,7 +1614,7 @@ uint32_t ArmMir2Lir::EncodeRange(LIR* head_lir, LIR* tail_lir, uint32_t offset) lir->offset = offset; if (!lir->flags.is_nop) { if (lir->flags.fixup != kFixupNone) { - if (lir->opcode >= 0) { + if (!IsPseudoLirOp(lir->opcode)) { lir->flags.size = EncodingMap[lir->opcode].size; lir->flags.fixup = EncodingMap[lir->opcode].fixup; } else if (UNLIKELY(lir->opcode == kPseudoPseudoAlign4)) { diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc index a4ea10b..933c1a3 100644 --- a/compiler/dex/quick/arm/target_arm.cc +++ b/compiler/dex/quick/arm/target_arm.cc @@ -718,14 +718,17 @@ int ArmMir2Lir::LoadHelper(ThreadOffset offset) { } uint64_t ArmMir2Lir::GetTargetInstFlags(int opcode) { + DCHECK(!IsPseudoLirOp(opcode)); return ArmMir2Lir::EncodingMap[opcode].flags; } const char* ArmMir2Lir::GetTargetInstName(int opcode) { + DCHECK(!IsPseudoLirOp(opcode)); return ArmMir2Lir::EncodingMap[opcode].name; } const char* ArmMir2Lir::GetTargetInstFmt(int opcode) { + DCHECK(!IsPseudoLirOp(opcode)); return ArmMir2Lir::EncodingMap[opcode].fmt; } diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc index a7b8dfe..00de8de 100644 --- a/compiler/dex/quick/arm/utility_arm.cc +++ b/compiler/dex/quick/arm/utility_arm.cc @@ -327,7 +327,7 @@ LIR* ArmMir2Lir::OpRegRegShift(OpKind op, int r_dest_src1, int r_src2, LOG(FATAL) << "Bad opcode: " << op; break; } - DCHECK_GE(static_cast<int>(opcode), 0); + DCHECK(!IsPseudoLirOp(opcode)); if (EncodingMap[opcode].flags & IS_BINARY_OP) { return NewLIR2(opcode, r_dest_src1, r_src2); } else if (EncodingMap[opcode].flags & IS_TERTIARY_OP) { @@ -405,7 +405,7 @@ LIR* ArmMir2Lir::OpRegRegRegShift(OpKind op, int r_dest, int r_src1, LOG(FATAL) << "Bad opcode: " << op; break; } - DCHECK_GE(static_cast<int>(opcode), 0); + DCHECK(!IsPseudoLirOp(opcode)); if (EncodingMap[opcode].flags & IS_QUAD_OP) { return NewLIR4(opcode, r_dest, r_src1, r_src2, shift); } else { diff --git a/compiler/dex/quick/local_optimizations.cc b/compiler/dex/quick/local_optimizations.cc index f915779..0f29578 100644 --- a/compiler/dex/quick/local_optimizations.cc +++ b/compiler/dex/quick/local_optimizations.cc @@ -78,7 +78,7 @@ void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) { } for (this_lir = PREV_LIR(tail_lir); this_lir != head_lir; this_lir = PREV_LIR(this_lir)) { - if (is_pseudo_opcode(this_lir->opcode)) { + if (IsPseudoLirOp(this_lir->opcode)) { continue; } @@ -135,7 +135,7 @@ void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) { * Skip already dead instructions (whose dataflow information is * outdated and misleading). */ - if (check_lir->flags.is_nop || is_pseudo_opcode(check_lir->opcode)) { + if (check_lir->flags.is_nop || IsPseudoLirOp(check_lir->opcode)) { continue; } @@ -285,7 +285,7 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) { /* Start from the second instruction */ for (this_lir = NEXT_LIR(head_lir); this_lir != tail_lir; this_lir = NEXT_LIR(this_lir)) { - if (is_pseudo_opcode(this_lir->opcode)) { + if (IsPseudoLirOp(this_lir->opcode)) { continue; } @@ -362,7 +362,7 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) { * Store the dependent or non-pseudo/indepedent instruction to the * list. */ - if (stop_here || !is_pseudo_opcode(check_lir->opcode)) { + if (stop_here || !IsPseudoLirOp(check_lir->opcode)) { prev_inst_list[next_slot++] = check_lir; if (next_slot == MAX_HOIST_DISTANCE) { break; @@ -393,7 +393,7 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) { int slot; LIR* dep_lir = prev_inst_list[next_slot-1]; /* If there is ld-ld dependency, wait LDLD_DISTANCE cycles */ - if (!is_pseudo_opcode(dep_lir->opcode) && + if (!IsPseudoLirOp(dep_lir->opcode) && (GetTargetInstFlags(dep_lir->opcode) & IS_LOAD)) { first_slot -= LDLD_DISTANCE; } @@ -434,7 +434,7 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) { * Try to find two instructions with load/use dependency until * the remaining instructions are less than LD_LATENCY. */ - bool prev_is_load = is_pseudo_opcode(prev_lir->opcode) ? false : + bool prev_is_load = IsPseudoLirOp(prev_lir->opcode) ? false : (GetTargetInstFlags(prev_lir->opcode) & IS_LOAD); if (((cur_lir->u.m.use_mask & prev_lir->u.m.def_mask) && prev_is_load) || (slot < LD_LATENCY)) { break; diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc index 3a6207c..6bfccfd 100644 --- a/compiler/dex/quick/mips/assemble_mips.cc +++ b/compiler/dex/quick/mips/assemble_mips.cc @@ -646,6 +646,7 @@ AssemblerStatus MipsMir2Lir::AssembleInstructions(uintptr_t start_addr) { if (res != kSuccess) { continue; } + DCHECK(!IsPseudoLirOp(lir->opcode)); const MipsEncodingMap *encoder = &EncodingMap[lir->opcode]; uint32_t bits = encoder->skeleton; int i; @@ -695,6 +696,7 @@ AssemblerStatus MipsMir2Lir::AssembleInstructions(uintptr_t start_addr) { code_buffer_.push_back((bits >> 24) & 0xff); // TUNING: replace with proper delay slot handling if (encoder->size == 8) { + DCHECK(!IsPseudoLirOp(lir->opcode)); const MipsEncodingMap *encoder = &EncodingMap[kMipsNop]; uint32_t bits = encoder->skeleton; code_buffer_.push_back(bits & 0xff); @@ -707,6 +709,7 @@ AssemblerStatus MipsMir2Lir::AssembleInstructions(uintptr_t start_addr) { } int MipsMir2Lir::GetInsnSize(LIR* lir) { + DCHECK(!IsPseudoLirOp(lir->opcode)); return EncodingMap[lir->opcode].size; } diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc index f9d432e..0ee32d4 100644 --- a/compiler/dex/quick/mips/target_mips.cc +++ b/compiler/dex/quick/mips/target_mips.cc @@ -553,14 +553,17 @@ Mir2Lir* MipsCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, } uint64_t MipsMir2Lir::GetTargetInstFlags(int opcode) { + DCHECK(!IsPseudoLirOp(opcode)); return MipsMir2Lir::EncodingMap[opcode].flags; } const char* MipsMir2Lir::GetTargetInstName(int opcode) { + DCHECK(!IsPseudoLirOp(opcode)); return MipsMir2Lir::EncodingMap[opcode].name; } const char* MipsMir2Lir::GetTargetInstFmt(int opcode) { + DCHECK(!IsPseudoLirOp(opcode)); return MipsMir2Lir::EncodingMap[opcode].fmt; } diff --git a/compiler/dex/quick/mir_to_lir-inl.h b/compiler/dex/quick/mir_to_lir-inl.h index 314c57e..f293700 100644 --- a/compiler/dex/quick/mir_to_lir-inl.h +++ b/compiler/dex/quick/mir_to_lir-inl.h @@ -69,7 +69,7 @@ inline LIR* Mir2Lir::RawLIR(int dalvik_offset, int opcode, int op0, * operands. */ inline LIR* Mir2Lir::NewLIR0(int opcode) { - DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & NO_OPERAND)) + DCHECK(IsPseudoLirOp(opcode) || (GetTargetInstFlags(opcode) & NO_OPERAND)) << GetTargetInstName(opcode) << " " << opcode << " " << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " " << current_dalvik_offset_; @@ -79,7 +79,7 @@ inline LIR* Mir2Lir::NewLIR0(int opcode) { } inline LIR* Mir2Lir::NewLIR1(int opcode, int dest) { - DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_UNARY_OP)) + DCHECK(IsPseudoLirOp(opcode) || (GetTargetInstFlags(opcode) & IS_UNARY_OP)) << GetTargetInstName(opcode) << " " << opcode << " " << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " " << current_dalvik_offset_; @@ -89,7 +89,7 @@ inline LIR* Mir2Lir::NewLIR1(int opcode, int dest) { } inline LIR* Mir2Lir::NewLIR2(int opcode, int dest, int src1) { - DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_BINARY_OP)) + DCHECK(IsPseudoLirOp(opcode) || (GetTargetInstFlags(opcode) & IS_BINARY_OP)) << GetTargetInstName(opcode) << " " << opcode << " " << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " " << current_dalvik_offset_; @@ -99,7 +99,7 @@ inline LIR* Mir2Lir::NewLIR2(int opcode, int dest, int src1) { } inline LIR* Mir2Lir::NewLIR3(int opcode, int dest, int src1, int src2) { - DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_TERTIARY_OP)) + DCHECK(IsPseudoLirOp(opcode) || (GetTargetInstFlags(opcode) & IS_TERTIARY_OP)) << GetTargetInstName(opcode) << " " << opcode << " " << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " " << current_dalvik_offset_; @@ -109,7 +109,7 @@ inline LIR* Mir2Lir::NewLIR3(int opcode, int dest, int src1, int src2) { } inline LIR* Mir2Lir::NewLIR4(int opcode, int dest, int src1, int src2, int info) { - DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_QUAD_OP)) + DCHECK(IsPseudoLirOp(opcode) || (GetTargetInstFlags(opcode) & IS_QUAD_OP)) << GetTargetInstName(opcode) << " " << opcode << " " << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " " << current_dalvik_offset_; @@ -120,7 +120,7 @@ inline LIR* Mir2Lir::NewLIR4(int opcode, int dest, int src1, int src2, int info) inline LIR* Mir2Lir::NewLIR5(int opcode, int dest, int src1, int src2, int info1, int info2) { - DCHECK(is_pseudo_opcode(opcode) || (GetTargetInstFlags(opcode) & IS_QUIN_OP)) + DCHECK(IsPseudoLirOp(opcode) || (GetTargetInstFlags(opcode) & IS_QUIN_OP)) << GetTargetInstName(opcode) << " " << opcode << " " << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " " << current_dalvik_offset_; @@ -142,8 +142,10 @@ inline void Mir2Lir::SetupRegMask(uint64_t* mask, int reg) { inline void Mir2Lir::SetupResourceMasks(LIR* lir) { int opcode = lir->opcode; - if ((opcode < 0) && (opcode != kPseudoBarrier)) { - lir->flags.fixup = kFixupLabel; + if (IsPseudoLirOp(opcode)) { + if (opcode != kPseudoBarrier) { + lir->flags.fixup = kFixupLabel; + } return; } diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h index 21711e5..5df2672 100644 --- a/compiler/dex/quick/mir_to_lir.h +++ b/compiler/dex/quick/mir_to_lir.h @@ -181,7 +181,6 @@ Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, #define SLOW_STRING_PATH (cu_->enable_debug & (1 << kDebugSlowStringPath)) #define SLOW_TYPE_PATH (cu_->enable_debug & (1 << kDebugSlowTypePath)) #define EXERCISE_SLOWEST_STRING_PATH (cu_->enable_debug & (1 << kDebugSlowestStringPath)) -#define is_pseudo_opcode(opcode) (static_cast<int>(opcode) < 0) class Mir2Lir : public Backend { public: @@ -257,6 +256,10 @@ class Mir2Lir : public Backend { return code_buffer_.size() / sizeof(code_buffer_[0]); } + bool IsPseudoLirOp(int opcode) { + return (opcode < 0); + } + // Shared by all targets - implemented in codegen_util.cc void AppendLIR(LIR* lir); void InsertLIRBefore(LIR* current_lir, LIR* new_lir); diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc index b1634da..064ff31 100644 --- a/compiler/dex/quick/x86/assemble_x86.cc +++ b/compiler/dex/quick/x86/assemble_x86.cc @@ -362,6 +362,7 @@ static size_t ComputeSize(const X86EncodingMap* entry, int base, int displacemen } int X86Mir2Lir::GetInsnSize(LIR* lir) { + DCHECK(!IsPseudoLirOp(lir->opcode)); const X86EncodingMap* entry = &X86Mir2Lir::EncodingMap[lir->opcode]; switch (entry->kind) { case kData: @@ -1166,7 +1167,7 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(uintptr_t start_addr) { const bool kVerbosePcFixup = false; for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) { - if (lir->opcode < 0) { + if (IsPseudoLirOp(lir->opcode)) { continue; } @@ -1393,7 +1394,7 @@ int X86Mir2Lir::AssignInsnOffsets() { for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) { lir->offset = offset; - if (LIKELY(lir->opcode >= 0)) { + if (LIKELY(!IsPseudoLirOp(lir->opcode))) { if (!lir->flags.is_nop) { offset += lir->flags.size; } diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc index f080830..0f005da 100644 --- a/compiler/dex/quick/x86/target_x86.cc +++ b/compiler/dex/quick/x86/target_x86.cc @@ -524,14 +524,17 @@ int X86Mir2Lir::LoadHelper(ThreadOffset offset) { } uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) { + DCHECK(!IsPseudoLirOp(opcode)); return X86Mir2Lir::EncodingMap[opcode].flags; } const char* X86Mir2Lir::GetTargetInstName(int opcode) { + DCHECK(!IsPseudoLirOp(opcode)); return X86Mir2Lir::EncodingMap[opcode].name; } const char* X86Mir2Lir::GetTargetInstFmt(int opcode) { + DCHECK(!IsPseudoLirOp(opcode)); return X86Mir2Lir::EncodingMap[opcode].fmt; } |