diff options
author | Calin Juravle <calin@google.com> | 2015-02-16 13:13:29 +0000 |
---|---|---|
committer | Calin Juravle <calin@google.com> | 2015-02-20 14:26:08 +0000 |
commit | b1498f67b444c897fa8f1530777ef118e05aa631 (patch) | |
tree | 7ff4709329b0ba752a6111103a76fcea896a3adb /compiler/optimizing | |
parent | acf735c13998ad2a175f5a17e7bfce220073279d (diff) | |
download | art-b1498f67b444c897fa8f1530777ef118e05aa631.zip art-b1498f67b444c897fa8f1530777ef118e05aa631.tar.gz art-b1498f67b444c897fa8f1530777ef118e05aa631.tar.bz2 |
Improve type propagation with if-contexts
This works by adding a new instruction (HBoundType) after each `if (a
instanceof ClassA) {}` to bound the type that `a` can take in the True-
dominated blocks.
Change-Id: Iae6a150b353486d4509b0d9b092164675732b90c
Diffstat (limited to 'compiler/optimizing')
-rw-r--r-- | compiler/optimizing/code_generator_arm.cc | 12 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 12 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86.cc | 12 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86_64.cc | 12 | ||||
-rw-r--r-- | compiler/optimizing/nodes.h | 77 | ||||
-rw-r--r-- | compiler/optimizing/prepare_for_register_allocation.cc | 5 | ||||
-rw-r--r-- | compiler/optimizing/prepare_for_register_allocation.h | 1 | ||||
-rw-r--r-- | compiler/optimizing/reference_type_propagation.cc | 283 | ||||
-rw-r--r-- | compiler/optimizing/reference_type_propagation.h | 22 |
9 files changed, 300 insertions, 136 deletions
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 7b0231b..e864ae1 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -3865,5 +3865,17 @@ void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, DCHECK(!IsLeafMethod()); } +void LocationsBuilderARM::VisitBoundType(HBoundType* instruction) { + // Nothing to do, this should be removed during prepare for register allocator. + UNUSED(instruction); + LOG(FATAL) << "Unreachable"; +} + +void InstructionCodeGeneratorARM::VisitBoundType(HBoundType* instruction) { + // Nothing to do, this should be removed during prepare for register allocator. + UNUSED(instruction); + LOG(FATAL) << "Unreachable"; +} + } // namespace arm } // namespace art diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 8220207..6da1e61 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -2582,6 +2582,18 @@ void InstructionCodeGeneratorARM64::VisitXor(HXor* instruction) { HandleBinaryOp(instruction); } +void LocationsBuilderARM64::VisitBoundType(HBoundType* instruction) { + // Nothing to do, this should be removed during prepare for register allocator. + UNUSED(instruction); + LOG(FATAL) << "Unreachable"; +} + +void InstructionCodeGeneratorARM64::VisitBoundType(HBoundType* instruction) { + // Nothing to do, this should be removed during prepare for register allocator. + UNUSED(instruction); + LOG(FATAL) << "Unreachable"; +} + #undef __ #undef QUICK_ENTRY_POINT diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 8a73eb4..e151c6b 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -3914,5 +3914,17 @@ void InstructionCodeGeneratorX86::HandleBitwiseOperation(HBinaryOperation* instr } } +void LocationsBuilderX86::VisitBoundType(HBoundType* instruction) { + // Nothing to do, this should be removed during prepare for register allocator. + UNUSED(instruction); + LOG(FATAL) << "Unreachable"; +} + +void InstructionCodeGeneratorX86::VisitBoundType(HBoundType* instruction) { + // Nothing to do, this should be removed during prepare for register allocator. + UNUSED(instruction); + LOG(FATAL) << "Unreachable"; +} + } // namespace x86 } // namespace art diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index f7ec67f..41a19e1 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -3711,5 +3711,17 @@ void InstructionCodeGeneratorX86_64::HandleBitwiseOperation(HBinaryOperation* in } } +void LocationsBuilderX86_64::VisitBoundType(HBoundType* instruction) { + // Nothing to do, this should be removed during prepare for register allocator. + UNUSED(instruction); + LOG(FATAL) << "Unreachable"; +} + +void InstructionCodeGeneratorX86_64::VisitBoundType(HBoundType* instruction) { + // Nothing to do, this should be removed during prepare for register allocator. + UNUSED(instruction); + LOG(FATAL) << "Unreachable"; +} + } // namespace x86_64 } // namespace art diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index c7b8343..4baea66 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -583,6 +583,7 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> { M(ArrayLength, Instruction) \ M(ArraySet, Instruction) \ M(BoundsCheck, Instruction) \ + M(BoundType, Instruction) \ M(CheckCast, Instruction) \ M(ClinitCheck, Instruction) \ M(Compare, BinaryOperation) \ @@ -876,39 +877,30 @@ class HEnvironment : public ArenaObject<kArenaAllocMisc> { class ReferenceTypeInfo : ValueObject { public: - ReferenceTypeInfo() : is_exact_(false), is_top_(true) {} - ReferenceTypeInfo(Handle<mirror::Class> type_handle, bool is_exact) { - SetTypeHandle(type_handle, is_exact); - } - - bool IsExact() const { return is_exact_; } - bool IsTop() const { return is_top_; } - - Handle<mirror::Class> GetTypeHandle() const { return type_handle_; } - - void SetTop() { - is_top_ = true; - type_handle_ = Handle<mirror::Class>(); - } + typedef Handle<mirror::Class> TypeHandle; - void SetInexact() { is_exact_ = false; } - - void SetTypeHandle(Handle<mirror::Class> type_handle, bool is_exact) + static ReferenceTypeInfo Create(TypeHandle type_handle, bool is_exact) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - is_exact_ = is_exact; if (type_handle->IsObjectClass()) { - is_top_ = true; // Override the type handle to be consistent with the case when we get to // Top but don't have the Object class available. It avoids having to guess // what value the type_handle has when it's Top. - type_handle_ = Handle<mirror::Class>(); + return ReferenceTypeInfo(TypeHandle(), is_exact, true); } else { - is_top_ = false; - type_handle_ = type_handle; + return ReferenceTypeInfo(type_handle, is_exact, false); } } - bool IsSupertypeOf(ReferenceTypeInfo rti) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static ReferenceTypeInfo CreateTop(bool is_exact) { + return ReferenceTypeInfo(TypeHandle(), is_exact, true); + } + + bool IsExact() const { return is_exact_; } + bool IsTop() const { return is_top_; } + + Handle<mirror::Class> GetTypeHandle() const { return type_handle_; } + + bool IsSupertypeOf(ReferenceTypeInfo rti) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (IsTop()) { // Top (equivalent for java.lang.Object) is supertype of anything. return true; @@ -943,9 +935,14 @@ class ReferenceTypeInfo : ValueObject { } private: + ReferenceTypeInfo() : ReferenceTypeInfo(TypeHandle(), false, true) {} + ReferenceTypeInfo(TypeHandle type_handle, bool is_exact, bool is_top) + : type_handle_(type_handle), is_exact_(is_exact), is_top_(is_top) {} + // The class of the object. - Handle<mirror::Class> type_handle_; + TypeHandle type_handle_; // Whether or not the type is exact or a superclass of the actual type. + // Whether or not we have any information about this type. bool is_exact_; // A true value here means that the object type should be java.lang.Object. // We don't have access to the corresponding mirror object every time so this @@ -968,7 +965,8 @@ class HInstruction : public ArenaObject<kArenaAllocMisc> { locations_(nullptr), live_interval_(nullptr), lifetime_position_(kNoLifetime), - side_effects_(side_effects) {} + side_effects_(side_effects), + reference_type_info_(ReferenceTypeInfo::CreateTop(/* is_exact */ false)) {} virtual ~HInstruction() {} @@ -2740,7 +2738,8 @@ class HLoadClass : public HExpression<0> { type_index_(type_index), is_referrers_class_(is_referrers_class), dex_pc_(dex_pc), - generate_clinit_check_(false) {} + generate_clinit_check_(false), + loaded_class_rti_(ReferenceTypeInfo::CreateTop(/* is_exact */ false)) {} bool CanBeMoved() const OVERRIDE { return true; } @@ -3006,6 +3005,32 @@ class HInstanceOf : public HExpression<2> { DISALLOW_COPY_AND_ASSIGN(HInstanceOf); }; +class HBoundType : public HExpression<1> { + public: + HBoundType(HInstruction* input, ReferenceTypeInfo bound_type) + : HExpression(Primitive::kPrimNot, SideEffects::None()), + bound_type_(bound_type) { + SetRawInputAt(0, input); + } + + const ReferenceTypeInfo& GetBoundType() const { return bound_type_; } + + bool CanBeNull() const OVERRIDE { + // `null instanceof ClassX` always return false so we can't be null. + return false; + } + + DECLARE_INSTRUCTION(BoundType); + + private: + // Encodes the most upper class that this instruction can have. In other words + // it is always the case that GetBoundType().IsSupertypeOf(GetReferenceType()). + // It is used to bound the type in cases like `if (x instanceof ClassX) {}` + const ReferenceTypeInfo bound_type_; + + DISALLOW_COPY_AND_ASSIGN(HBoundType); +}; + class HCheckCast : public HTemplateInstruction<2> { public: HCheckCast(HInstruction* object, diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc index 12acd08..2d9a2bf 100644 --- a/compiler/optimizing/prepare_for_register_allocation.cc +++ b/compiler/optimizing/prepare_for_register_allocation.cc @@ -42,6 +42,11 @@ void PrepareForRegisterAllocation::VisitBoundsCheck(HBoundsCheck* check) { check->ReplaceWith(check->InputAt(0)); } +void PrepareForRegisterAllocation::VisitBoundType(HBoundType* bound_type) { + bound_type->ReplaceWith(bound_type->InputAt(0)); + bound_type->GetBlock()->RemoveInstruction(bound_type); +} + void PrepareForRegisterAllocation::VisitClinitCheck(HClinitCheck* check) { HLoadClass* cls = check->GetLoadClass(); check->ReplaceWith(cls); diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h index 0fdb65f..0f697fb 100644 --- a/compiler/optimizing/prepare_for_register_allocation.h +++ b/compiler/optimizing/prepare_for_register_allocation.h @@ -36,6 +36,7 @@ class PrepareForRegisterAllocation : public HGraphDelegateVisitor { virtual void VisitNullCheck(HNullCheck* check) OVERRIDE; virtual void VisitDivZeroCheck(HDivZeroCheck* check) OVERRIDE; virtual void VisitBoundsCheck(HBoundsCheck* check) OVERRIDE; + virtual void VisitBoundType(HBoundType* bound_type) OVERRIDE; virtual void VisitClinitCheck(HClinitCheck* check) OVERRIDE; virtual void VisitCondition(HCondition* condition) OVERRIDE; diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc index 18c0564..76b8d7e 100644 --- a/compiler/optimizing/reference_type_propagation.cc +++ b/compiler/optimizing/reference_type_propagation.cc @@ -23,24 +23,7 @@ namespace art { -// TODO: Only do the analysis on reference types. We currently have to handle -// the `null` constant, that is represented as a `HIntConstant` and therefore -// has the Primitive::kPrimInt type. - -// TODO: handle: -// public Main ifNullTest(int count, Main a) { -// Main m = new Main(); -// if (a == null) { -// a = m; -// } -// return a.g(); -// } -// public Main ifNotNullTest(Main a) { -// if (a != null) { -// return a.g(); -// } -// return new Main(); -// } +// TODO: handle: a !=/== null. void ReferenceTypePropagation::Run() { // To properly propagate type info we need to visit in the dominator-based order. @@ -52,65 +35,80 @@ void ReferenceTypePropagation::Run() { ProcessWorklist(); } -// Re-computes and updates the nullability of the instruction. Returns whether or -// not the nullability was changed. -bool ReferenceTypePropagation::UpdateNullability(HPhi* phi) { - bool existing_can_be_null = phi->CanBeNull(); - bool new_can_be_null = false; - for (size_t i = 0; i < phi->InputCount(); i++) { - new_can_be_null |= phi->InputAt(i)->CanBeNull(); - } - phi->SetCanBeNull(new_can_be_null); +void ReferenceTypePropagation::VisitBasicBlock(HBasicBlock* block) { + // TODO: handle other instructions that give type info + // (NewArray/Call/Field accesses/array accesses) - return existing_can_be_null != new_can_be_null; -} + // Initialize exact types first for faster convergence. + for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { + HInstruction* instr = it.Current(); + if (instr->IsNewInstance()) { + VisitNewInstance(instr->AsNewInstance()); + } else if (instr->IsLoadClass()) { + VisitLoadClass(instr->AsLoadClass()); + } + } -bool ReferenceTypePropagation::UpdateReferenceTypeInfo(HPhi* phi) { - ScopedObjectAccess soa(Thread::Current()); + // Handle Phis. + for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { + VisitPhi(it.Current()->AsPhi()); + } - ReferenceTypeInfo existing_rti = phi->GetReferenceTypeInfo(); - ReferenceTypeInfo new_rti = phi->InputAt(0)->GetReferenceTypeInfo(); + // Add extra nodes to bound types. + BoundTypeForIfInstanceOf(block); +} - if (new_rti.IsTop() && !new_rti.IsExact()) { - // Early return if we are Top and inexact. - phi->SetReferenceTypeInfo(new_rti); - return !new_rti.IsEqual(existing_rti);; +// Detects if `block` is the True block for the pattern +// `if (x instanceof ClassX) { }` +// If that's the case insert an HBoundType instruction to bound the type of `x` +// to `ClassX` in the scope of the dominated blocks. +void ReferenceTypePropagation::BoundTypeForIfInstanceOf(HBasicBlock* block) { + HInstruction* lastInstruction = block->GetLastInstruction(); + if (!lastInstruction->IsIf()) { + return; + } + HInstruction* ifInput = lastInstruction->InputAt(0); + // TODO: Handle more patterns here: HIf(bool) HIf(HNotEqual). + if (!ifInput->IsEqual()) { + return; + } + HInstruction* instanceOf = ifInput->InputAt(0); + HInstruction* comp_value = ifInput->InputAt(1); + if (!instanceOf->IsInstanceOf() || !comp_value->IsIntConstant()) { + return; } - for (size_t i = 1; i < phi->InputCount(); i++) { - ReferenceTypeInfo input_rti = phi->InputAt(i)->GetReferenceTypeInfo(); + HInstruction* obj = instanceOf->InputAt(0); + HLoadClass* load_class = instanceOf->InputAt(1)->AsLoadClass(); - if (!input_rti.IsExact()) { - new_rti.SetInexact(); - } + ReferenceTypeInfo obj_rti = obj->GetReferenceTypeInfo(); + ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI(); + HBoundType* bound_type = new (graph_->GetArena()) HBoundType(obj, class_rti); - if (input_rti.IsTop()) { - new_rti.SetTop(); + // Narrow the type as much as possible. + { + ScopedObjectAccess soa(Thread::Current()); + if (!load_class->IsResolved() || class_rti.IsSupertypeOf(obj_rti)) { + bound_type->SetReferenceTypeInfo(obj_rti); + } else { + bound_type->SetReferenceTypeInfo( + ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false)); } + } - if (new_rti.IsTop()) { - if (!new_rti.IsExact()) { - break; - } else { - continue; - } - } + block->InsertInstructionBefore(bound_type, lastInstruction); + // Pick the right successor based on the value we compare against. + HIntConstant* comp_value_int = comp_value->AsIntConstant(); + HBasicBlock* instanceOfTrueBlock = comp_value_int->GetValue() == 0 + ? lastInstruction->AsIf()->IfFalseSuccessor() + : lastInstruction->AsIf()->IfTrueSuccessor(); - if (new_rti.GetTypeHandle().Get() == input_rti.GetTypeHandle().Get()) { - // nothing to do if we have the same type - } else if (input_rti.IsSupertypeOf(new_rti)) { - new_rti.SetTypeHandle(input_rti.GetTypeHandle(), false); - } else if (new_rti.IsSupertypeOf(input_rti)) { - new_rti.SetInexact(); - } else { - // TODO: Find common parent. - new_rti.SetTop(); - new_rti.SetInexact(); + for (HUseIterator<HInstruction*> it(obj->GetUses()); !it.Done(); it.Advance()) { + HInstruction* user = it.Current()->GetUser(); + if (instanceOfTrueBlock->Dominates(user->GetBlock())) { + user->ReplaceInput(bound_type, it.Current()->GetIndex()); } } - - phi->SetReferenceTypeInfo(new_rti); - return !new_rti.IsEqual(existing_rti); } void ReferenceTypePropagation::VisitNewInstance(HNewInstance* instr) { @@ -120,7 +118,7 @@ void ReferenceTypePropagation::VisitNewInstance(HNewInstance* instr) { mirror::Class* resolved_class = dex_cache->GetResolvedType(instr->GetTypeIndex()); if (resolved_class != nullptr) { MutableHandle<mirror::Class> handle = handles_->NewHandle(resolved_class); - instr->SetReferenceTypeInfo(ReferenceTypeInfo(handle, true)); + instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(handle, true)); } } @@ -131,71 +129,146 @@ void ReferenceTypePropagation::VisitLoadClass(HLoadClass* instr) { mirror::Class* resolved_class = dex_cache->GetResolvedType(instr->GetTypeIndex()); if (resolved_class != nullptr) { Handle<mirror::Class> handle = handles_->NewHandle(resolved_class); - instr->SetLoadedClassRTI(ReferenceTypeInfo(handle, true)); + instr->SetLoadedClassRTI(ReferenceTypeInfo::Create(handle, /* is_exact */ true)); } Handle<mirror::Class> class_handle = handles_->NewHandle(mirror::Class::GetJavaLangClass()); - instr->SetReferenceTypeInfo(ReferenceTypeInfo(class_handle, true)); + instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(class_handle, /* is_exact */ true)); } -void ReferenceTypePropagation::VisitBasicBlock(HBasicBlock* block) { - // TODO: handle other instructions that give type info - // (NewArray/Call/Field accesses/array accesses) - for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { - HInstruction* instr = it.Current(); - if (instr->IsNewInstance()) { - VisitNewInstance(instr->AsNewInstance()); - } else if (instr->IsLoadClass()) { - VisitLoadClass(instr->AsLoadClass()); - } +void ReferenceTypePropagation::VisitPhi(HPhi* phi) { + if (phi->GetType() != Primitive::kPrimNot) { + return; } - if (block->IsLoopHeader()) { - for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { - // Set the initial type for the phi. Use the non back edge input for reaching - // a fixed point faster. - HPhi* phi = it.Current()->AsPhi(); - if (phi->GetType() == Primitive::kPrimNot) { - AddToWorklist(phi); - phi->SetCanBeNull(phi->InputAt(0)->CanBeNull()); - phi->SetReferenceTypeInfo(phi->InputAt(0)->GetReferenceTypeInfo()); - } + + if (phi->GetBlock()->IsLoopHeader()) { + // Set the initial type for the phi. Use the non back edge input for reaching + // a fixed point faster. + AddToWorklist(phi); + phi->SetCanBeNull(phi->InputAt(0)->CanBeNull()); + phi->SetReferenceTypeInfo(phi->InputAt(0)->GetReferenceTypeInfo()); + } else { + // Eagerly compute the type of the phi, for quicker convergence. Note + // that we don't need to add users to the worklist because we are + // doing a reverse post-order visit, therefore either the phi users are + // non-loop phi and will be visited later in the visit, or are loop-phis, + // and they are already in the work list. + UpdateNullability(phi); + UpdateReferenceTypeInfo(phi); + } +} + +ReferenceTypeInfo ReferenceTypePropagation::MergeTypes(const ReferenceTypeInfo& a, + const ReferenceTypeInfo& b) { + bool is_exact = a.IsExact() && b.IsExact(); + bool is_top = a.IsTop() || b.IsTop(); + Handle<mirror::Class> type_handle; + + if (!is_top) { + if (a.GetTypeHandle().Get() == b.GetTypeHandle().Get()) { + type_handle = a.GetTypeHandle(); + } else if (a.IsSupertypeOf(b)) { + type_handle = a.GetTypeHandle(); + is_exact = false; + } else if (b.IsSupertypeOf(a)) { + type_handle = b.GetTypeHandle(); + is_exact = false; + } else { + // TODO: Find a common super class. + is_top = true; + is_exact = false; } + } + + return is_top + ? ReferenceTypeInfo::CreateTop(is_exact) + : ReferenceTypeInfo::Create(type_handle, is_exact); +} + +bool ReferenceTypePropagation::UpdateReferenceTypeInfo(HInstruction* instr) { + ScopedObjectAccess soa(Thread::Current()); + + ReferenceTypeInfo previous_rti = instr->GetReferenceTypeInfo(); + if (instr->IsBoundType()) { + UpdateBoundType(instr->AsBoundType()); + } else if (instr->IsPhi()) { + UpdatePhi(instr->AsPhi()); } else { - for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { - // Eagerly compute the type of the phi, for quicker convergence. Note - // that we don't need to add users to the worklist because we are - // doing a reverse post-order visit, therefore either the phi users are - // non-loop phi and will be visited later in the visit, or are loop-phis, - // and they are already in the work list. - HPhi* phi = it.Current()->AsPhi(); - if (phi->GetType() == Primitive::kPrimNot) { - UpdateNullability(phi); - UpdateReferenceTypeInfo(phi); + LOG(FATAL) << "Invalid instruction (should not get here)"; + } + + return !previous_rti.IsEqual(instr->GetReferenceTypeInfo()); +} + +void ReferenceTypePropagation::UpdateBoundType(HBoundType* instr) { + ReferenceTypeInfo new_rti = instr->InputAt(0)->GetReferenceTypeInfo(); + // Be sure that we don't go over the bounded type. + ReferenceTypeInfo bound_rti = instr->GetBoundType(); + if (!bound_rti.IsSupertypeOf(new_rti)) { + new_rti = bound_rti; + } + instr->SetReferenceTypeInfo(new_rti); +} + +void ReferenceTypePropagation::UpdatePhi(HPhi* instr) { + ReferenceTypeInfo new_rti = instr->InputAt(0)->GetReferenceTypeInfo(); + if (new_rti.IsTop() && !new_rti.IsExact()) { + // Early return if we are Top and inexact. + instr->SetReferenceTypeInfo(new_rti); + return; + } + for (size_t i = 1; i < instr->InputCount(); i++) { + new_rti = MergeTypes(new_rti, instr->InputAt(i)->GetReferenceTypeInfo()); + if (new_rti.IsTop()) { + if (!new_rti.IsExact()) { + break; + } else { + continue; } } } + instr->SetReferenceTypeInfo(new_rti); +} + +// Re-computes and updates the nullability of the instruction. Returns whether or +// not the nullability was changed. +bool ReferenceTypePropagation::UpdateNullability(HInstruction* instr) { + DCHECK(instr->IsPhi() || instr->IsBoundType()); + + if (!instr->IsPhi()) { + return false; + } + + HPhi* phi = instr->AsPhi(); + bool existing_can_be_null = phi->CanBeNull(); + bool new_can_be_null = false; + for (size_t i = 0; i < phi->InputCount(); i++) { + new_can_be_null |= phi->InputAt(i)->CanBeNull(); + } + phi->SetCanBeNull(new_can_be_null); + + return existing_can_be_null != new_can_be_null; } void ReferenceTypePropagation::ProcessWorklist() { while (!worklist_.IsEmpty()) { - HPhi* instruction = worklist_.Pop(); + HInstruction* instruction = worklist_.Pop(); if (UpdateNullability(instruction) || UpdateReferenceTypeInfo(instruction)) { AddDependentInstructionsToWorklist(instruction); } } } -void ReferenceTypePropagation::AddToWorklist(HPhi* instruction) { - DCHECK_EQ(instruction->GetType(), Primitive::kPrimNot); +void ReferenceTypePropagation::AddToWorklist(HInstruction* instruction) { + DCHECK_EQ(instruction->GetType(), Primitive::kPrimNot) << instruction->GetType(); worklist_.Add(instruction); } -void ReferenceTypePropagation::AddDependentInstructionsToWorklist(HPhi* instruction) { +void ReferenceTypePropagation::AddDependentInstructionsToWorklist(HInstruction* instruction) { for (HUseIterator<HInstruction*> it(instruction->GetUses()); !it.Done(); it.Advance()) { - HPhi* phi = it.Current()->GetUser()->AsPhi(); - if (phi != nullptr) { - AddToWorklist(phi); + HInstruction* user = it.Current()->GetUser(); + if (user->IsPhi() || user->IsBoundType()) { + AddToWorklist(user); } } } - } // namespace art diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h index 1ec5df3..e346dbf 100644 --- a/compiler/optimizing/reference_type_propagation.h +++ b/compiler/optimizing/reference_type_propagation.h @@ -21,6 +21,7 @@ #include "handle_scope-inl.h" #include "nodes.h" #include "optimization.h" +#include "optimizing_compiler_stats.h" namespace art { @@ -44,18 +45,29 @@ class ReferenceTypePropagation : public HOptimization { private: void VisitNewInstance(HNewInstance* new_instance); void VisitLoadClass(HLoadClass* load_class); + void VisitPhi(HPhi* phi); void VisitBasicBlock(HBasicBlock* block); + + void UpdateBoundType(HBoundType* bound_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void UpdatePhi(HPhi* phi) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + void BoundTypeForIfInstanceOf(HBasicBlock* block); + void ProcessWorklist(); - void AddToWorklist(HPhi* phi); - void AddDependentInstructionsToWorklist(HPhi* phi); - bool UpdateNullability(HPhi* phi); - bool UpdateReferenceTypeInfo(HPhi* phi); + void AddToWorklist(HInstruction* instr); + void AddDependentInstructionsToWorklist(HInstruction* instr); + + bool UpdateNullability(HInstruction* instr); + bool UpdateReferenceTypeInfo(HInstruction* instr); + + ReferenceTypeInfo MergeTypes(const ReferenceTypeInfo& a, const ReferenceTypeInfo& b) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); const DexFile& dex_file_; const DexCompilationUnit& dex_compilation_unit_; StackHandleScopeCollection* handles_; - GrowableArray<HPhi*> worklist_; + GrowableArray<HInstruction*> worklist_; static constexpr size_t kDefaultWorklistSize = 8; |