summaryrefslogtreecommitdiffstats
path: root/compiler
diff options
context:
space:
mode:
authorVladimir Marko <vmarko@google.com>2014-07-29 12:04:10 +0100
committerVladimir Marko <vmarko@google.com>2014-07-31 09:57:09 +0100
commitb19955d3c8fbd9588f7e17299e559d02938154b6 (patch)
tree10113a67776d1bb050115043e47e6970a85103c5 /compiler
parent36b111c7d3d635e262114dabde4c26952c7dcbe6 (diff)
downloadart-b19955d3c8fbd9588f7e17299e559d02938154b6.zip
art-b19955d3c8fbd9588f7e17299e559d02938154b6.tar.gz
art-b19955d3c8fbd9588f7e17299e559d02938154b6.tar.bz2
Reduce time and memory usage of GVN.
Filter out dead sregs in GVN. Reclaim memory after each LVN in the GVN modification phase. Bug: 16398693 Change-Id: I8c88c3009663754e1b66c0ef3f62c3b93276e385
Diffstat (limited to 'compiler')
-rw-r--r--compiler/dex/frontend.cc2
-rw-r--r--compiler/dex/global_value_numbering.cc26
-rw-r--r--compiler/dex/global_value_numbering.h5
-rw-r--r--compiler/dex/global_value_numbering_test.cc15
-rw-r--r--compiler/dex/local_value_numbering.cc108
-rw-r--r--compiler/dex/local_value_numbering.h28
-rw-r--r--compiler/dex/local_value_numbering_test.cc2
-rw-r--r--compiler/dex/mir_optimization.cc6
-rw-r--r--compiler/utils/scoped_arena_allocator.h4
9 files changed, 126 insertions, 70 deletions
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index 51446f6..11dd182 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -747,7 +747,7 @@ static CompiledMethod* CompileMethod(CompilerDriver& driver,
/* Free Arenas from the cu.arena_stack for reuse by the cu.arena in the codegen. */
if (cu.enable_debug & (1 << kDebugShowMemoryUsage)) {
- if (cu.arena_stack.PeakBytesAllocated() > 256 * 1024) {
+ if (cu.arena_stack.PeakBytesAllocated() > 1 * 1024 * 1024) {
MemStats stack_stats(cu.arena_stack.GetPeakStats());
LOG(INFO) << method_name << " " << Dumpable<MemStats>(stack_stats);
}
diff --git a/compiler/dex/global_value_numbering.cc b/compiler/dex/global_value_numbering.cc
index d86be4e..d7ef6f0 100644
--- a/compiler/dex/global_value_numbering.cc
+++ b/compiler/dex/global_value_numbering.cc
@@ -43,7 +43,8 @@ GlobalValueNumbering::~GlobalValueNumbering() {
STLDeleteElements(&lvns_);
}
-LocalValueNumbering* GlobalValueNumbering::PrepareBasicBlock(BasicBlock* bb) {
+LocalValueNumbering* GlobalValueNumbering::PrepareBasicBlock(BasicBlock* bb,
+ ScopedArenaAllocator* allocator) {
if (UNLIKELY(!Good())) {
return nullptr;
}
@@ -58,13 +59,17 @@ LocalValueNumbering* GlobalValueNumbering::PrepareBasicBlock(BasicBlock* bb) {
last_value_ = kNoValue; // Make bad.
return nullptr;
}
+ if (allocator == nullptr) {
+ allocator = allocator_;
+ }
DCHECK(work_lvn_.get() == nullptr);
- work_lvn_.reset(new (allocator_) LocalValueNumbering(this, bb->id));
+ work_lvn_.reset(new (allocator) LocalValueNumbering(this, bb->id, allocator));
if (bb->block_type == kEntryBlock) {
if ((cu_->access_flags & kAccStatic) == 0) {
// If non-static method, mark "this" as non-null
int this_reg = cu_->num_dalvik_registers - cu_->num_ins;
- work_lvn_->SetSRegNullChecked(this_reg);
+ uint16_t value_name = work_lvn_->GetSRegValueName(this_reg);
+ work_lvn_->SetValueNameNullChecked(value_name);
}
} else {
// To avoid repeated allocation on the ArenaStack, reuse a single vector kept as a member.
@@ -120,7 +125,9 @@ LocalValueNumbering* GlobalValueNumbering::PrepareBasicBlock(BasicBlock* bb) {
work_lvn_->MergeOne(*merge_lvns_[0], merge_type);
BasicBlock* pred_bb = mir_graph_->GetBasicBlock(merge_lvns_[0]->Id());
if (HasNullCheckLastInsn(pred_bb, bb->id)) {
- work_lvn_->SetSRegNullChecked(pred_bb->last_mir_insn->ssa_rep->uses[0]);
+ int s_reg = pred_bb->last_mir_insn->ssa_rep->uses[0];
+ uint16_t value_name = merge_lvns_[0]->GetSRegValueName(s_reg);
+ work_lvn_->SetValueNameNullChecked(value_name);
}
} else {
work_lvn_->Merge(merge_type);
@@ -135,9 +142,14 @@ bool GlobalValueNumbering::FinishBasicBlock(BasicBlock* bb) {
++bbs_processed_;
merge_lvns_.clear();
- std::unique_ptr<const LocalValueNumbering> old_lvn(lvns_[bb->id]);
- lvns_[bb->id] = work_lvn_.release();
- return (old_lvn == nullptr) || !old_lvn->Equals(*lvns_[bb->id]);
+ bool change = (lvns_[bb->id] == nullptr) || !lvns_[bb->id]->Equals(*work_lvn_);
+ if (change) {
+ std::unique_ptr<const LocalValueNumbering> old_lvn(lvns_[bb->id]);
+ lvns_[bb->id] = work_lvn_.release();
+ } else {
+ work_lvn_.reset();
+ }
+ return change;
}
uint16_t GlobalValueNumbering::GetFieldId(const MirFieldInfo& field_info, uint16_t type) {
diff --git a/compiler/dex/global_value_numbering.h b/compiler/dex/global_value_numbering.h
index a12a779..c06ff6f 100644
--- a/compiler/dex/global_value_numbering.h
+++ b/compiler/dex/global_value_numbering.h
@@ -32,7 +32,8 @@ class GlobalValueNumbering {
~GlobalValueNumbering();
// Prepare LVN for the basic block.
- LocalValueNumbering* PrepareBasicBlock(BasicBlock* bb);
+ LocalValueNumbering* PrepareBasicBlock(BasicBlock* bb,
+ ScopedArenaAllocator* allocator = nullptr);
// Finish processing the basic block.
bool FinishBasicBlock(BasicBlock* bb);
@@ -55,7 +56,7 @@ class GlobalValueNumbering {
// GlobalValueNumbering should be allocated on the ArenaStack (or the native stack).
static void* operator new(size_t size, ScopedArenaAllocator* allocator) {
- return allocator->Alloc(sizeof(GlobalValueNumbering), kArenaAllocMIR);
+ return allocator->Alloc(sizeof(GlobalValueNumbering), kArenaAllocMisc);
}
// Allow delete-expression to destroy a GlobalValueNumbering object without deallocation.
diff --git a/compiler/dex/global_value_numbering_test.cc b/compiler/dex/global_value_numbering_test.cc
index c82d231..e8501cd 100644
--- a/compiler/dex/global_value_numbering_test.cc
+++ b/compiler/dex/global_value_numbering_test.cc
@@ -212,6 +212,7 @@ class GlobalValueNumberingTest : public testing::Test {
if (def->type == kDalvikByteCode || def->type == kEntryBlock || def->type == kExitBlock) {
bb->data_flow_info = static_cast<BasicBlockDataFlow*>(
cu_.arena.Alloc(sizeof(BasicBlockDataFlow), kArenaAllocDFInfo));
+ bb->data_flow_info->live_in_v = live_in_v_;
}
}
cu_.mir_graph->num_blocks_ = count;
@@ -333,12 +334,23 @@ class GlobalValueNumberingTest : public testing::Test {
ssa_reps_(),
allocator_(),
gvn_(),
- value_names_() {
+ value_names_(),
+ live_in_v_(new (&cu_.arena) ArenaBitVector(&cu_.arena, kMaxSsaRegs, false, kBitMapMisc)) {
cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
cu_.access_flags = kAccStatic; // Don't let "this" interfere with this test.
allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
+ // Bind all possible sregs to live vregs for test purposes.
+ live_in_v_->SetInitialBits(kMaxSsaRegs);
+ cu_.mir_graph->ssa_base_vregs_ = new (&cu_.arena) GrowableArray<int>(&cu_.arena, kMaxSsaRegs);
+ cu_.mir_graph->ssa_subscripts_ = new (&cu_.arena) GrowableArray<int>(&cu_.arena, kMaxSsaRegs);
+ for (unsigned int i = 0; i < kMaxSsaRegs; i++) {
+ cu_.mir_graph->ssa_base_vregs_->Insert(i);
+ cu_.mir_graph->ssa_subscripts_->Insert(0);
+ }
}
+ static constexpr size_t kMaxSsaRegs = 16384u;
+
ArenaPool pool_;
CompilationUnit cu_;
size_t mir_count_;
@@ -347,6 +359,7 @@ class GlobalValueNumberingTest : public testing::Test {
std::unique_ptr<ScopedArenaAllocator> allocator_;
std::unique_ptr<GlobalValueNumbering> gvn_;
std::vector<uint16_t> value_names_;
+ ArenaBitVector* live_in_v_;
};
class GlobalValueNumberingTestDiamond : public GlobalValueNumberingTest {
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index 0e072ec..5997568 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -197,11 +197,7 @@ LocalValueNumbering::AliasingValues* LocalValueNumbering::GetAliasingValues(
Map* map, const typename Map::key_type& key) {
auto lb = map->lower_bound(key);
if (lb == map->end() || map->key_comp()(key, lb->first)) {
- map->PutBefore(lb, key, AliasingValues(gvn_->allocator_));
- // The new entry was inserted before lb.
- DCHECK(lb != map->begin());
- --lb;
- DCHECK(!map->key_comp()(lb->first, key) && !map->key_comp()(key, lb->first));
+ lb = map->PutBefore(lb, key, AliasingValues(this));
}
return &lb->second;
}
@@ -308,25 +304,37 @@ bool LocalValueNumbering::HandleAliasingValuesPut(Map* map, const typename Map::
return true;
}
-LocalValueNumbering::LocalValueNumbering(GlobalValueNumbering* gvn, uint16_t id)
+template <typename K>
+void LocalValueNumbering::CopyAliasingValuesMap(ScopedArenaSafeMap<K, AliasingValues>* dest,
+ const ScopedArenaSafeMap<K, AliasingValues>& src) {
+ // We need each new AliasingValues (or rather its map members) to be constructed
+ // with our allocator, rather than the allocator of the source.
+ for (const auto& entry : src) {
+ auto it = dest->PutBefore(dest->end(), entry.first, AliasingValues(this));
+ it->second = entry.second; // Map assignments preserve current allocator.
+ }
+}
+
+LocalValueNumbering::LocalValueNumbering(GlobalValueNumbering* gvn, uint16_t id,
+ ScopedArenaAllocator* allocator)
: gvn_(gvn),
id_(id),
- sreg_value_map_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
- sreg_wide_value_map_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
- sfield_value_map_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
- non_aliasing_ifield_value_map_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
- aliasing_ifield_value_map_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
- non_aliasing_array_value_map_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
- aliasing_array_value_map_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
+ sreg_value_map_(std::less<uint16_t>(), allocator->Adapter()),
+ sreg_wide_value_map_(std::less<uint16_t>(), allocator->Adapter()),
+ sfield_value_map_(std::less<uint16_t>(), allocator->Adapter()),
+ non_aliasing_ifield_value_map_(std::less<uint16_t>(), allocator->Adapter()),
+ aliasing_ifield_value_map_(std::less<uint16_t>(), allocator->Adapter()),
+ non_aliasing_array_value_map_(std::less<uint16_t>(), allocator->Adapter()),
+ aliasing_array_value_map_(std::less<uint16_t>(), allocator->Adapter()),
global_memory_version_(0u),
- non_aliasing_refs_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
- escaped_refs_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
- escaped_ifield_clobber_set_(EscapedIFieldClobberKeyComparator(), gvn->Allocator()->Adapter()),
- escaped_array_clobber_set_(EscapedArrayClobberKeyComparator(), gvn->Allocator()->Adapter()),
- range_checked_(RangeCheckKeyComparator() , gvn->Allocator()->Adapter()),
- null_checked_(std::less<uint16_t>(), gvn->Allocator()->Adapter()),
- merge_names_(gvn->Allocator()->Adapter()),
- merge_map_(std::less<ScopedArenaVector<BasicBlockId>>(), gvn->Allocator()->Adapter()),
+ non_aliasing_refs_(std::less<uint16_t>(), allocator->Adapter()),
+ escaped_refs_(std::less<uint16_t>(), allocator->Adapter()),
+ escaped_ifield_clobber_set_(EscapedIFieldClobberKeyComparator(), allocator->Adapter()),
+ escaped_array_clobber_set_(EscapedArrayClobberKeyComparator(), allocator->Adapter()),
+ range_checked_(RangeCheckKeyComparator() , allocator->Adapter()),
+ null_checked_(std::less<uint16_t>(), allocator->Adapter()),
+ merge_names_(allocator->Adapter()),
+ merge_map_(std::less<ScopedArenaVector<BasicBlockId>>(), allocator->Adapter()),
merge_new_memory_version_(kNoValue) {
std::fill_n(unresolved_sfield_version_, kFieldTypeCount, 0u);
std::fill_n(unresolved_ifield_version_, kFieldTypeCount, 0u);
@@ -352,8 +360,8 @@ bool LocalValueNumbering::Equals(const LocalValueNumbering& other) const {
}
void LocalValueNumbering::MergeOne(const LocalValueNumbering& other, MergeType merge_type) {
- sreg_value_map_ = other.sreg_value_map_;
- sreg_wide_value_map_ = other.sreg_wide_value_map_;
+ CopyLiveSregValues(&sreg_value_map_, other.sreg_value_map_);
+ CopyLiveSregValues(&sreg_wide_value_map_, other.sreg_wide_value_map_);
if (merge_type == kReturnMerge) {
// RETURN or PHI+RETURN. We need only sreg value maps.
@@ -361,7 +369,7 @@ void LocalValueNumbering::MergeOne(const LocalValueNumbering& other, MergeType m
}
non_aliasing_ifield_value_map_ = other.non_aliasing_ifield_value_map_;
- non_aliasing_array_value_map_ = other.non_aliasing_array_value_map_;
+ CopyAliasingValuesMap(&non_aliasing_array_value_map_, other.non_aliasing_array_value_map_);
non_aliasing_refs_ = other.non_aliasing_refs_;
range_checked_ = other.range_checked_;
null_checked_ = other.null_checked_;
@@ -380,8 +388,8 @@ void LocalValueNumbering::MergeOne(const LocalValueNumbering& other, MergeType m
std::copy_n(other.unresolved_ifield_version_, kFieldTypeCount, unresolved_ifield_version_);
std::copy_n(other.unresolved_sfield_version_, kFieldTypeCount, unresolved_sfield_version_);
sfield_value_map_ = other.sfield_value_map_;
- aliasing_ifield_value_map_ = other.aliasing_ifield_value_map_;
- aliasing_array_value_map_ = other.aliasing_array_value_map_;
+ CopyAliasingValuesMap(&aliasing_ifield_value_map_, other.aliasing_ifield_value_map_);
+ CopyAliasingValuesMap(&aliasing_array_value_map_, other.aliasing_array_value_map_);
escaped_refs_ = other.escaped_refs_;
escaped_ifield_clobber_set_ = other.escaped_ifield_clobber_set_;
escaped_array_clobber_set_ = other.escaped_array_clobber_set_;
@@ -493,8 +501,20 @@ void LocalValueNumbering::IntersectSets() {
}
}
-template <typename Map, Map LocalValueNumbering::* map_ptr>
-void LocalValueNumbering::IntersectMaps() {
+void LocalValueNumbering::CopyLiveSregValues(SregValueMap* dest, const SregValueMap& src) {
+ auto dest_end = dest->end();
+ ArenaBitVector* live_in_v = gvn_->GetMirGraph()->GetBasicBlock(id_)->data_flow_info->live_in_v;
+ DCHECK(live_in_v != nullptr);
+ for (const auto& entry : src) {
+ bool live = live_in_v->IsBitSet(gvn_->GetMirGraph()->SRegToVReg(entry.first));
+ if (live) {
+ dest->PutBefore(dest_end, entry.first, entry.second);
+ }
+ }
+}
+
+template <LocalValueNumbering::SregValueMap LocalValueNumbering::* map_ptr>
+void LocalValueNumbering::IntersectSregValueMaps() {
DCHECK_GE(gvn_->merge_lvns_.size(), 2u);
// Find the LVN with the least entries in the set.
@@ -506,18 +526,22 @@ void LocalValueNumbering::IntersectMaps() {
}
// For each key check if it's in all the LVNs.
+ ArenaBitVector* live_in_v = gvn_->GetMirGraph()->GetBasicBlock(id_)->data_flow_info->live_in_v;
+ DCHECK(live_in_v != nullptr);
for (const auto& entry : least_entries_lvn->*map_ptr) {
- bool checked = true;
- for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
- if (lvn != least_entries_lvn) {
- auto it = (lvn->*map_ptr).find(entry.first);
- if (it == (lvn->*map_ptr).end() || !(it->second == entry.second)) {
- checked = false;
- break;
+ bool live_and_same = live_in_v->IsBitSet(gvn_->GetMirGraph()->SRegToVReg(entry.first));
+ if (live_and_same) {
+ for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
+ if (lvn != least_entries_lvn) {
+ auto it = (lvn->*map_ptr).find(entry.first);
+ if (it == (lvn->*map_ptr).end() || !(it->second == entry.second)) {
+ live_and_same = false;
+ break;
+ }
}
}
}
- if (checked) {
+ if (live_and_same) {
(this->*map_ptr).PutBefore((this->*map_ptr).end(), entry.first, entry.second);
}
}
@@ -721,11 +745,7 @@ void LocalValueNumbering::MergeAliasingValues(const typename Map::value_type& en
typename Map::iterator hint) {
const typename Map::key_type& key = entry.first;
- (this->*map_ptr).PutBefore(hint, key, AliasingValues(gvn_->allocator_));
- DCHECK(hint != (this->*map_ptr).begin());
- AliasingIFieldValuesMap::iterator it = hint;
- --it;
- DCHECK_EQ(it->first, key);
+ auto it = (this->*map_ptr).PutBefore(hint, key, AliasingValues(this));
AliasingValues* my_values = &it->second;
const AliasingValues* cmp_values = nullptr;
@@ -849,8 +869,8 @@ void LocalValueNumbering::MergeAliasingValues(const typename Map::value_type& en
void LocalValueNumbering::Merge(MergeType merge_type) {
DCHECK_GE(gvn_->merge_lvns_.size(), 2u);
- IntersectMaps<SregValueMap, &LocalValueNumbering::sreg_value_map_>();
- IntersectMaps<SregValueMap, &LocalValueNumbering::sreg_wide_value_map_>();
+ IntersectSregValueMaps<&LocalValueNumbering::sreg_value_map_>();
+ IntersectSregValueMaps<&LocalValueNumbering::sreg_wide_value_map_>();
if (merge_type == kReturnMerge) {
// RETURN or PHI+RETURN. We need only sreg value maps.
return;
@@ -1385,7 +1405,7 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
if (kLocalValueNumberingEnableFilledNewArrayTracking && mir->ssa_rep->num_uses != 0u) {
AliasingValues* values = GetAliasingValues(&non_aliasing_array_value_map_, array);
// Clear the value if we got a merged version in a loop.
- *values = AliasingValues(gvn_->allocator_);
+ *values = AliasingValues(this);
for (size_t i = 0u, count = mir->ssa_rep->num_uses; i != count; ++i) {
DCHECK_EQ(High16Bits(i), 0u);
uint16_t index = gvn_->LookupValue(Instruction::CONST, i, 0u, 0);
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
index 190eab4..855d66d 100644
--- a/compiler/dex/local_value_numbering.h
+++ b/compiler/dex/local_value_numbering.h
@@ -36,7 +36,7 @@ class LocalValueNumbering {
static constexpr uint16_t kNoValue = GlobalValueNumbering::kNoValue;
public:
- LocalValueNumbering(GlobalValueNumbering* gvn, BasicBlockId id);
+ LocalValueNumbering(GlobalValueNumbering* gvn, BasicBlockId id, ScopedArenaAllocator* allocator);
BasicBlockId Id() const {
return id_;
@@ -44,9 +44,11 @@ class LocalValueNumbering {
bool Equals(const LocalValueNumbering& other) const;
- // Set non-static method's "this".
- void SetSRegNullChecked(uint16_t s_reg) {
- uint16_t value_name = GetOperandValue(s_reg);
+ uint16_t GetSRegValueName(uint16_t s_reg) const {
+ return GetOperandValue(s_reg);
+ }
+
+ void SetValueNameNullChecked(uint16_t value_name) {
null_checked_.insert(value_name);
}
@@ -76,7 +78,7 @@ class LocalValueNumbering {
// LocalValueNumbering should be allocated on the ArenaStack (or the native stack).
static void* operator new(size_t size, ScopedArenaAllocator* allocator) {
- return allocator->Alloc(sizeof(LocalValueNumbering), kArenaAllocMIR);
+ return allocator->Alloc(sizeof(LocalValueNumbering), kArenaAllocMisc);
}
// Allow delete-expression to destroy a LocalValueNumbering object without deallocation.
@@ -225,12 +227,12 @@ class LocalValueNumbering {
// store or because they contained the last_stored_value before the store and thus could not
// have changed as a result.
struct AliasingValues {
- explicit AliasingValues(ScopedArenaAllocator* allocator)
+ explicit AliasingValues(LocalValueNumbering* lvn)
: memory_version_before_stores(kNoValue),
last_stored_value(kNoValue),
- store_loc_set(std::less<uint16_t>(), allocator->Adapter()),
+ store_loc_set(std::less<uint16_t>(), lvn->null_checked_.get_allocator()),
last_load_memory_version(kNoValue),
- load_value_map(std::less<uint16_t>(), allocator->Adapter()) {
+ load_value_map(std::less<uint16_t>(), lvn->null_checked_.get_allocator()) {
}
uint16_t memory_version_before_stores; // kNoValue if start version for the field.
@@ -286,6 +288,10 @@ class LocalValueNumbering {
bool HandleAliasingValuesPut(Map* map, const typename Map::key_type& key,
uint16_t location, uint16_t value);
+ template <typename K>
+ void CopyAliasingValuesMap(ScopedArenaSafeMap<K, AliasingValues>* dest,
+ const ScopedArenaSafeMap<K, AliasingValues>& src);
+
uint16_t MarkNonAliasingNonNull(MIR* mir);
bool IsNonAliasing(uint16_t reg) const;
bool IsNonAliasingIField(uint16_t reg, uint16_t field_id, uint16_t type) const;
@@ -314,9 +320,11 @@ class LocalValueNumbering {
template <typename Set, Set LocalValueNumbering::* set_ptr>
void IntersectSets();
+ void CopyLiveSregValues(SregValueMap* dest, const SregValueMap& src);
+
// Intersect maps as sets. The value type must be equality-comparable.
- template <typename Map, Map LocalValueNumbering::* map_ptr>
- void IntersectMaps();
+ template <SregValueMap LocalValueNumbering::* map_ptr>
+ void IntersectSregValueMaps();
// Intersect maps as sets. The value type must be equality-comparable.
template <typename Map>
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index b3eae42..e4e944e 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -196,7 +196,7 @@ class LocalValueNumberingTest : public testing::Test {
cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
gvn_.reset(new (allocator_.get()) GlobalValueNumbering(&cu_, allocator_.get()));
- lvn_.reset(new (allocator_.get()) LocalValueNumbering(gvn_.get(), 0u));
+ lvn_.reset(new (allocator_.get()) LocalValueNumbering(gvn_.get(), 0u, allocator_.get()));
gvn_->AllowModifications();
}
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index d355ddc..23ceb56 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -329,7 +329,8 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
if (use_lvn) {
allocator.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
global_valnum.reset(new (allocator.get()) GlobalValueNumbering(cu_, allocator.get()));
- local_valnum.reset(new (allocator.get()) LocalValueNumbering(global_valnum.get(), bb->id));
+ local_valnum.reset(new (allocator.get()) LocalValueNumbering(global_valnum.get(), bb->id,
+ allocator.get()));
}
while (bb != NULL) {
for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
@@ -1170,7 +1171,8 @@ void MIRGraph::ApplyGlobalValueNumberingEnd() {
temp_gvn_->AllowModifications();
PreOrderDfsIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
- LocalValueNumbering* lvn = temp_gvn_->PrepareBasicBlock(bb);
+ ScopedArenaAllocator allocator(&cu_->arena_stack); // Reclaim memory after each LVN.
+ LocalValueNumbering* lvn = temp_gvn_->PrepareBasicBlock(bb, &allocator);
if (lvn != nullptr) {
for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
lvn->GetValueNumber(mir);
diff --git a/compiler/utils/scoped_arena_allocator.h b/compiler/utils/scoped_arena_allocator.h
index 37799cb..9f33f2d 100644
--- a/compiler/utils/scoped_arena_allocator.h
+++ b/compiler/utils/scoped_arena_allocator.h
@@ -222,11 +222,11 @@ class ScopedArenaAllocatorAdapter : private DebugStackReference, private DebugSt
}
void construct(pointer p, const_reference val) {
- DebugStackIndirectTopRef::CheckTop();
+ // Don't CheckTop(), allow reusing existing capacity of a vector/deque below the top.
new (static_cast<void*>(p)) value_type(val);
}
void destroy(pointer p) {
- DebugStackIndirectTopRef::CheckTop();
+ // Don't CheckTop(), allow reusing existing capacity of a vector/deque below the top.
p->~value_type();
}