summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVladimir Marko <vmarko@google.com>2015-02-13 10:28:29 +0000
committerVladimir Marko <vmarko@google.com>2015-02-13 11:29:04 +0000
commite4fcc5ba2284c201c022b52d27f7a1201d696324 (patch)
tree2f490060978baeb85c79d6184fcc08135f506637
parent8240a8af33aedea9a4fe5c3b394d7c025ad081fb (diff)
downloadart-e4fcc5ba2284c201c022b52d27f7a1201d696324.zip
art-e4fcc5ba2284c201c022b52d27f7a1201d696324.tar.gz
art-e4fcc5ba2284c201c022b52d27f7a1201d696324.tar.bz2
Clean up Scoped-/ArenaAlocator array allocations.
Change-Id: Id718f8a4450adf1608306286fa4e6b9194022532
-rw-r--r--compiler/dex/global_value_numbering.h3
-rw-r--r--compiler/dex/global_value_numbering_test.cc6
-rw-r--r--compiler/dex/local_value_numbering_test.cc2
-rw-r--r--compiler/dex/mir_analysis.cc10
-rw-r--r--compiler/dex/mir_dataflow.cc21
-rw-r--r--compiler/dex/mir_graph.cc8
-rw-r--r--compiler/dex/mir_graph.h5
-rw-r--r--compiler/dex/mir_optimization.cc32
-rw-r--r--compiler/dex/mir_optimization_test.cc2
-rw-r--r--compiler/dex/quick/mir_to_lir.cc4
-rw-r--r--compiler/dex/quick/mir_to_lir.h2
-rw-r--r--compiler/dex/quick/ralloc_util.cc11
-rw-r--r--compiler/dex/ssa_transformation.cc17
-rw-r--r--compiler/dex/vreg_analysis.cc3
-rw-r--r--compiler/utils/arena_allocator.h7
-rw-r--r--compiler/utils/arena_containers.h3
-rw-r--r--compiler/utils/growable_array.h9
-rw-r--r--compiler/utils/scoped_arena_allocator.h7
18 files changed, 63 insertions, 89 deletions
diff --git a/compiler/dex/global_value_numbering.h b/compiler/dex/global_value_numbering.h
index cdafc68..023dbdb 100644
--- a/compiler/dex/global_value_numbering.h
+++ b/compiler/dex/global_value_numbering.h
@@ -271,8 +271,7 @@ template <typename Container> // Container of MirIFieldLoweringInfo or MirSFiel
uint16_t* GlobalValueNumbering::PrepareGvnFieldIds(ScopedArenaAllocator* allocator,
const Container& field_infos) {
size_t size = field_infos.size();
- uint16_t* field_ids = reinterpret_cast<uint16_t*>(allocator->Alloc(size * sizeof(uint16_t),
- kArenaAllocMisc));
+ uint16_t* field_ids = allocator->AllocArray<uint16_t>(size, kArenaAllocMisc);
for (size_t i = 0u; i != size; ++i) {
size_t idx = i;
const MirFieldInfo& cur_info = field_infos[i];
diff --git a/compiler/dex/global_value_numbering_test.cc b/compiler/dex/global_value_numbering_test.cc
index f71b7ae..cfa6388 100644
--- a/compiler/dex/global_value_numbering_test.cc
+++ b/compiler/dex/global_value_numbering_test.cc
@@ -229,7 +229,7 @@ class GlobalValueNumberingTest : public testing::Test {
void DoPrepareMIRs(const MIRDef* defs, size_t count) {
mir_count_ = count;
- mirs_ = reinterpret_cast<MIR*>(cu_.arena.Alloc(sizeof(MIR) * count, kArenaAllocMIR));
+ mirs_ = cu_.arena.AllocArray<MIR>(count, kArenaAllocMIR);
ssa_reps_.resize(count);
for (size_t i = 0u; i != count; ++i) {
const MIRDef* def = &defs[i];
@@ -251,8 +251,8 @@ class GlobalValueNumberingTest : public testing::Test {
ASSERT_EQ(cu_.mir_graph->sfield_lowering_infos_[def->field_info].MemAccessType(),
SGetOrSPutMemAccessType(def->opcode));
} else if (def->opcode == static_cast<Instruction::Code>(kMirOpPhi)) {
- mir->meta.phi_incoming = static_cast<BasicBlockId*>(
- allocator_->Alloc(def->num_uses * sizeof(BasicBlockId), kArenaAllocDFInfo));
+ mir->meta.phi_incoming =
+ allocator_->AllocArray<BasicBlockId>(def->num_uses, kArenaAllocDFInfo);
ASSERT_EQ(def->num_uses, bb->predecessors.size());
std::copy(bb->predecessors.begin(), bb->predecessors.end(), mir->meta.phi_incoming);
}
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index c894892..9f18a3e 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -136,7 +136,7 @@ class LocalValueNumberingTest : public testing::Test {
void DoPrepareMIRs(const MIRDef* defs, size_t count) {
mir_count_ = count;
- mirs_ = reinterpret_cast<MIR*>(cu_.arena.Alloc(sizeof(MIR) * count, kArenaAllocMIR));
+ mirs_ = cu_.arena.AllocArray<MIR>(count, kArenaAllocMIR);
ssa_reps_.resize(count);
for (size_t i = 0u; i != count; ++i) {
const MIRDef* def = &defs[i];
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index 473196b..cc16dc4 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -1206,10 +1206,8 @@ void MIRGraph::DoCacheFieldLoweringInfo() {
// All IGET/IPUT/SGET/SPUT instructions take 2 code units and there must also be a RETURN.
const uint32_t max_refs = (GetNumDalvikInsns() - 1u) / 2u;
ScopedArenaAllocator allocator(&cu_->arena_stack);
- uint16_t* field_idxs =
- reinterpret_cast<uint16_t*>(allocator.Alloc(max_refs * sizeof(uint16_t), kArenaAllocMisc));
- DexMemAccessType* field_types = reinterpret_cast<DexMemAccessType*>(
- allocator.Alloc(max_refs * sizeof(DexMemAccessType), kArenaAllocMisc));
+ uint16_t* field_idxs = allocator.AllocArray<uint16_t>(max_refs, kArenaAllocMisc);
+ DexMemAccessType* field_types = allocator.AllocArray<DexMemAccessType>(max_refs, kArenaAllocMisc);
// Find IGET/IPUT/SGET/SPUT insns, store IGET/IPUT fields at the beginning, SGET/SPUT at the end.
size_t ifield_pos = 0u;
@@ -1328,8 +1326,8 @@ void MIRGraph::DoCacheMethodLoweringInfo() {
// multi_index_container with one ordered index and one sequential index.
ScopedArenaSet<MapEntry, MapEntryComparator> invoke_map(MapEntryComparator(),
allocator.Adapter());
- const MapEntry** sequential_entries = reinterpret_cast<const MapEntry**>(
- allocator.Alloc(max_refs * sizeof(sequential_entries[0]), kArenaAllocMisc));
+ const MapEntry** sequential_entries =
+ allocator.AllocArray<const MapEntry*>(max_refs, kArenaAllocMisc);
// Find INVOKE insns and their devirtualization targets.
AllNodesIterator iter(this);
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index a1f4294..1f56276 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -1084,9 +1084,9 @@ void MIRGraph::AllocateSSAUseData(MIR *mir, int num_uses) {
mir->ssa_rep->num_uses = num_uses;
if (mir->ssa_rep->num_uses_allocated < num_uses) {
- mir->ssa_rep->uses = static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses, kArenaAllocDFInfo));
+ mir->ssa_rep->uses = arena_->AllocArray<int32_t>(num_uses, kArenaAllocDFInfo);
// NOTE: will be filled in during type & size inference pass
- mir->ssa_rep->fp_use = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses, kArenaAllocDFInfo));
+ mir->ssa_rep->fp_use = arena_->AllocArray<bool>(num_uses, kArenaAllocDFInfo);
}
}
@@ -1094,10 +1094,8 @@ void MIRGraph::AllocateSSADefData(MIR *mir, int num_defs) {
mir->ssa_rep->num_defs = num_defs;
if (mir->ssa_rep->num_defs_allocated < num_defs) {
- mir->ssa_rep->defs = static_cast<int*>(arena_->Alloc(sizeof(int) * num_defs,
- kArenaAllocDFInfo));
- mir->ssa_rep->fp_def = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_defs,
- kArenaAllocDFInfo));
+ mir->ssa_rep->defs = arena_->AllocArray<int32_t>(num_defs, kArenaAllocDFInfo);
+ mir->ssa_rep->fp_def = arena_->AllocArray<bool>(num_defs, kArenaAllocDFInfo);
}
}
@@ -1334,8 +1332,7 @@ bool MIRGraph::DoSSAConversion(BasicBlock* bb) {
* predecessor blocks.
*/
bb->data_flow_info->vreg_to_ssa_map_exit =
- static_cast<int*>(arena_->Alloc(sizeof(int) * GetNumOfCodeAndTempVRs(),
- kArenaAllocDFInfo));
+ arena_->AllocArray<int32_t>(GetNumOfCodeAndTempVRs(), kArenaAllocDFInfo);
memcpy(bb->data_flow_info->vreg_to_ssa_map_exit, vreg_to_ssa_map_,
sizeof(int) * GetNumOfCodeAndTempVRs());
@@ -1387,13 +1384,9 @@ void MIRGraph::CompilerInitializeSSAConversion() {
* Initialize the DalvikToSSAMap map. There is one entry for each
* Dalvik register, and the SSA names for those are the same.
*/
- vreg_to_ssa_map_ =
- static_cast<int*>(arena_->Alloc(sizeof(int) * num_reg,
- kArenaAllocDFInfo));
+ vreg_to_ssa_map_ = arena_->AllocArray<int32_t>(num_reg, kArenaAllocDFInfo);
/* Keep track of the higest def for each dalvik reg */
- ssa_last_defs_ =
- static_cast<int*>(arena_->Alloc(sizeof(int) * num_reg,
- kArenaAllocDFInfo));
+ ssa_last_defs_ = arena_->AllocArray<int>(num_reg, kArenaAllocDFInfo);
for (unsigned int i = 0; i < num_reg; i++) {
vreg_to_ssa_map_[i] = i;
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 92f960e..9f98589 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -1599,7 +1599,7 @@ char* MIRGraph::GetDalvikDisassembly(const MIR* mir) {
}
}
int length = str.length() + 1;
- ret = static_cast<char*>(arena_->Alloc(length, kArenaAllocDFInfo));
+ ret = arena_->AllocArray<char>(length, kArenaAllocDFInfo);
strncpy(ret, str.c_str(), length);
return ret;
}
@@ -1736,8 +1736,8 @@ CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type,
move_result_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
}
info->num_arg_words = mir->ssa_rep->num_uses;
- info->args = (info->num_arg_words == 0) ? NULL : static_cast<RegLocation*>
- (arena_->Alloc(sizeof(RegLocation) * info->num_arg_words, kArenaAllocMisc));
+ info->args = (info->num_arg_words == 0) ? nullptr :
+ arena_->AllocArray<RegLocation>(info->num_arg_words, kArenaAllocMisc);
for (int i = 0; i < info->num_arg_words; i++) {
info->args[i] = GetRawSrc(mir, i);
}
@@ -1768,7 +1768,7 @@ BasicBlock* MIRGraph::NewMemBB(BBType block_type, int block_id) {
void MIRGraph::InitializeConstantPropagation() {
is_constant_v_ = new (arena_) ArenaBitVector(arena_, GetNumSSARegs(), false);
- constant_values_ = static_cast<int*>(arena_->Alloc(sizeof(int) * GetNumSSARegs(), kArenaAllocDFInfo));
+ constant_values_ = arena_->AllocArray<int>(GetNumSSARegs(), kArenaAllocDFInfo);
}
void MIRGraph::InitializeMethodUses() {
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 27dca65..c33825b 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -626,8 +626,7 @@ class MIRGraph {
}
void EnableOpcodeCounting() {
- opcode_count_ = static_cast<int*>(arena_->Alloc(kNumPackedOpcodes * sizeof(int),
- kArenaAllocMisc));
+ opcode_count_ = arena_->AllocArray<int>(kNumPackedOpcodes, kArenaAllocMisc);
}
void ShowOpcodeStats();
@@ -1324,7 +1323,7 @@ class MIRGraph {
ArenaVector<int> ssa_base_vregs_;
ArenaVector<int> ssa_subscripts_;
// Map original Dalvik virtual reg i to the current SSA name.
- int* vreg_to_ssa_map_; // length == method->registers_size
+ int32_t* vreg_to_ssa_map_; // length == method->registers_size
int* ssa_last_defs_; // length == method->registers_size
ArenaBitVector* is_constant_v_; // length == num_ssa_reg
int* constant_values_; // length == num_ssa_reg
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 8718191..dac0210 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -632,8 +632,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
} else {
DCHECK_EQ(SelectKind(if_true), kSelectMove);
DCHECK_EQ(SelectKind(if_false), kSelectMove);
- int* src_ssa =
- static_cast<int*>(arena_->Alloc(sizeof(int) * 3, kArenaAllocDFInfo));
+ int32_t* src_ssa = arena_->AllocArray<int32_t>(3, kArenaAllocDFInfo);
src_ssa[0] = mir->ssa_rep->uses[0];
src_ssa[1] = if_true->ssa_rep->uses[0];
src_ssa[2] = if_false->ssa_rep->uses[0];
@@ -641,15 +640,12 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
mir->ssa_rep->num_uses = 3;
}
mir->ssa_rep->num_defs = 1;
- mir->ssa_rep->defs =
- static_cast<int*>(arena_->Alloc(sizeof(int) * 1, kArenaAllocDFInfo));
- mir->ssa_rep->fp_def =
- static_cast<bool*>(arena_->Alloc(sizeof(bool) * 1, kArenaAllocDFInfo));
+ mir->ssa_rep->defs = arena_->AllocArray<int32_t>(1, kArenaAllocDFInfo);
+ mir->ssa_rep->fp_def = arena_->AllocArray<bool>(1, kArenaAllocDFInfo);
mir->ssa_rep->fp_def[0] = if_true->ssa_rep->fp_def[0];
// Match type of uses to def.
- mir->ssa_rep->fp_use =
- static_cast<bool*>(arena_->Alloc(sizeof(bool) * mir->ssa_rep->num_uses,
- kArenaAllocDFInfo));
+ mir->ssa_rep->fp_use = arena_->AllocArray<bool>(mir->ssa_rep->num_uses,
+ kArenaAllocDFInfo);
for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
mir->ssa_rep->fp_use[i] = mir->ssa_rep->fp_def[0];
}
@@ -900,8 +896,8 @@ bool MIRGraph::EliminateNullChecksGate() {
temp_.nce.num_vregs = GetNumOfCodeAndTempVRs();
temp_.nce.work_vregs_to_check = new (temp_scoped_alloc_.get()) ArenaBitVector(
temp_scoped_alloc_.get(), temp_.nce.num_vregs, false, kBitMapNullCheck);
- temp_.nce.ending_vregs_to_check_matrix = static_cast<ArenaBitVector**>(
- temp_scoped_alloc_->Alloc(sizeof(ArenaBitVector*) * GetNumBlocks(), kArenaAllocMisc));
+ temp_.nce.ending_vregs_to_check_matrix =
+ temp_scoped_alloc_->AllocArray<ArenaBitVector*>(GetNumBlocks(), kArenaAllocMisc);
std::fill_n(temp_.nce.ending_vregs_to_check_matrix, GetNumBlocks(), nullptr);
// reset MIR_MARK
@@ -1133,8 +1129,7 @@ bool MIRGraph::EliminateClassInitChecksGate() {
// Each insn we use here has at least 2 code units, offset/2 will be a unique index.
const size_t end = (GetNumDalvikInsns() + 1u) / 2u;
- temp_.cice.indexes = static_cast<uint16_t*>(
- temp_scoped_alloc_->Alloc(end * sizeof(*temp_.cice.indexes), kArenaAllocGrowableArray));
+ temp_.cice.indexes = temp_scoped_alloc_->AllocArray<uint16_t>(end, kArenaAllocGrowableArray);
std::fill_n(temp_.cice.indexes, end, 0xffffu);
uint32_t unique_class_count = 0u;
@@ -1215,8 +1210,8 @@ bool MIRGraph::EliminateClassInitChecksGate() {
temp_.cice.num_class_bits = 2u * unique_class_count;
temp_.cice.work_classes_to_check = new (temp_scoped_alloc_.get()) ArenaBitVector(
temp_scoped_alloc_.get(), temp_.cice.num_class_bits, false, kBitMapClInitCheck);
- temp_.cice.ending_classes_to_check_matrix = static_cast<ArenaBitVector**>(
- temp_scoped_alloc_->Alloc(sizeof(ArenaBitVector*) * GetNumBlocks(), kArenaAllocMisc));
+ temp_.cice.ending_classes_to_check_matrix =
+ temp_scoped_alloc_->AllocArray<ArenaBitVector*>(GetNumBlocks(), kArenaAllocMisc);
std::fill_n(temp_.cice.ending_classes_to_check_matrix, GetNumBlocks(), nullptr);
DCHECK_GT(temp_.cice.num_class_bits, 0u);
return true;
@@ -1441,8 +1436,8 @@ void MIRGraph::InlineSpecialMethodsStart() {
temp_.smi.processed_indexes = new (temp_scoped_alloc_.get()) ArenaBitVector(
temp_scoped_alloc_.get(), temp_.smi.num_indexes, false, kBitMapMisc);
temp_.smi.processed_indexes->ClearAllBits();
- temp_.smi.lowering_infos = static_cast<uint16_t*>(temp_scoped_alloc_->Alloc(
- temp_.smi.num_indexes * sizeof(*temp_.smi.lowering_infos), kArenaAllocGrowableArray));
+ temp_.smi.lowering_infos =
+ temp_scoped_alloc_->AllocArray<uint16_t>(temp_.smi.num_indexes, kArenaAllocGrowableArray);
}
void MIRGraph::InlineSpecialMethods(BasicBlock* bb) {
@@ -1603,8 +1598,7 @@ bool MIRGraph::EliminateSuspendChecksGate() {
temp_.sce.inliner =
cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file);
}
- suspend_checks_in_loops_ = static_cast<uint32_t*>(
- arena_->Alloc(GetNumBlocks() * sizeof(*suspend_checks_in_loops_), kArenaAllocMisc));
+ suspend_checks_in_loops_ = arena_->AllocArray<uint32_t>(GetNumBlocks(), kArenaAllocMisc);
return true;
}
diff --git a/compiler/dex/mir_optimization_test.cc b/compiler/dex/mir_optimization_test.cc
index 199bc27..be05b80 100644
--- a/compiler/dex/mir_optimization_test.cc
+++ b/compiler/dex/mir_optimization_test.cc
@@ -280,7 +280,7 @@ class MirOptimizationTest : public testing::Test {
void DoPrepareMIRs(const MIRDef* defs, size_t count) {
mir_count_ = count;
- mirs_ = reinterpret_cast<MIR*>(cu_.arena.Alloc(sizeof(MIR) * count, kArenaAllocMIR));
+ mirs_ = cu_.arena.AllocArray<MIR>(count, kArenaAllocMIR);
uint64_t merged_df_flags = 0u;
for (size_t i = 0u; i != count; ++i) {
const MIRDef* def = &defs[i];
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 274e078..9f6d8af 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -1195,9 +1195,7 @@ void Mir2Lir::MethodMIR2LIR() {
cu_->NewTimingSplit("MIR2LIR");
// Hold the labels of each block.
- block_label_list_ =
- static_cast<LIR*>(arena_->Alloc(sizeof(LIR) * mir_graph_->GetNumBlocks(),
- kArenaAllocLIR));
+ block_label_list_ = arena_->AllocArray<LIR>(mir_graph_->GetNumBlocks(), kArenaAllocLIR);
PreOrderDfsIterator iter(mir_graph_);
BasicBlock* curr_bb = iter.Next();
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 9f1a497..88ca911 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -592,7 +592,7 @@ class Mir2Lir {
// strdup(), but allocates from the arena.
char* ArenaStrdup(const char* str) {
size_t len = strlen(str) + 1;
- char* res = reinterpret_cast<char*>(arena_->Alloc(len, kArenaAllocMisc));
+ char* res = arena_->AllocArray<char>(len, kArenaAllocMisc);
if (res != NULL) {
strncpy(res, str, len);
}
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 8efafb2..67fb804 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -1191,8 +1191,7 @@ void Mir2Lir::DoPromotion() {
int num_regs = mir_graph_->GetNumOfCodeAndTempVRs();
const int promotion_threshold = 1;
// Allocate the promotion map - one entry for each Dalvik vReg or compiler temp
- promotion_map_ = static_cast<PromotionMap*>
- (arena_->Alloc(num_regs * sizeof(promotion_map_[0]), kArenaAllocRegAlloc));
+ promotion_map_ = arena_->AllocArray<PromotionMap>(num_regs, kArenaAllocRegAlloc);
// Allow target code to add any special registers
AdjustSpillMask();
@@ -1210,12 +1209,8 @@ void Mir2Lir::DoPromotion() {
*/
size_t core_reg_count_size = WideGPRsAreAliases() ? num_regs : num_regs * 2;
size_t fp_reg_count_size = WideFPRsAreAliases() ? num_regs : num_regs * 2;
- RefCounts *core_regs =
- static_cast<RefCounts*>(arena_->Alloc(sizeof(RefCounts) * core_reg_count_size,
- kArenaAllocRegAlloc));
- RefCounts *fp_regs =
- static_cast<RefCounts *>(arena_->Alloc(sizeof(RefCounts) * fp_reg_count_size,
- kArenaAllocRegAlloc));
+ RefCounts *core_regs = arena_->AllocArray<RefCounts>(core_reg_count_size, kArenaAllocRegAlloc);
+ RefCounts *fp_regs = arena_->AllocArray<RefCounts>(fp_reg_count_size, kArenaAllocRegAlloc);
// Set ssa names for original Dalvik registers
for (int i = 0; i < num_regs; i++) {
core_regs[i].s_reg = fp_regs[i].s_reg = i;
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index f15f9be..fcea77c 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -137,8 +137,8 @@ void MIRGraph::ComputeDefBlockMatrix() {
/* Allocate num_registers bit vector pointers */
DCHECK(temp_scoped_alloc_ != nullptr);
DCHECK(temp_.ssa.def_block_matrix == nullptr);
- temp_.ssa.def_block_matrix = static_cast<ArenaBitVector**>(
- temp_scoped_alloc_->Alloc(sizeof(ArenaBitVector*) * num_registers, kArenaAllocDFInfo));
+ temp_.ssa.def_block_matrix =
+ temp_scoped_alloc_->AllocArray<ArenaBitVector*>(num_registers, kArenaAllocDFInfo);
int i;
/* Initialize num_register vectors with num_blocks bits each */
@@ -363,8 +363,7 @@ void MIRGraph::ComputeDominators() {
/* Initialize & Clear i_dom_list */
if (max_num_reachable_blocks_ < num_reachable_blocks_) {
- i_dom_list_ = static_cast<int*>(arena_->Alloc(sizeof(int) * num_reachable_blocks,
- kArenaAllocDFInfo));
+ i_dom_list_ = arena_->AllocArray<int>(num_reachable_blocks, kArenaAllocDFInfo);
}
for (int i = 0; i < num_reachable_blocks; i++) {
i_dom_list_[i] = NOTVISITED;
@@ -517,9 +516,7 @@ bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) {
size_t num_uses = bb->predecessors.size();
AllocateSSAUseData(mir, num_uses);
int* uses = mir->ssa_rep->uses;
- BasicBlockId* incoming =
- static_cast<BasicBlockId*>(arena_->Alloc(sizeof(BasicBlockId) * num_uses,
- kArenaAllocDFInfo));
+ BasicBlockId* incoming = arena_->AllocArray<BasicBlockId>(num_uses, kArenaAllocDFInfo);
mir->meta.phi_incoming = incoming;
int idx = 0;
for (BasicBlockId pred_id : bb->predecessors) {
@@ -542,12 +539,12 @@ void MIRGraph::DoDFSPreOrderSSARename(BasicBlock* block) {
/* Process this block */
DoSSAConversion(block);
- int map_size = sizeof(int) * GetNumOfCodeAndTempVRs();
/* Save SSA map snapshot */
ScopedArenaAllocator allocator(&cu_->arena_stack);
- int* saved_ssa_map =
- static_cast<int*>(allocator.Alloc(map_size, kArenaAllocDalvikToSSAMap));
+ uint32_t num_vregs = GetNumOfCodeAndTempVRs();
+ int32_t* saved_ssa_map = allocator.AllocArray<int32_t>(num_vregs, kArenaAllocDalvikToSSAMap);
+ size_t map_size = sizeof(saved_ssa_map[0]) * num_vregs;
memcpy(saved_ssa_map, vreg_to_ssa_map_, map_size);
if (block->fall_through != NullBasicBlockId) {
diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc
index f70850a..b620969 100644
--- a/compiler/dex/vreg_analysis.cc
+++ b/compiler/dex/vreg_analysis.cc
@@ -440,8 +440,7 @@ void MIRGraph::InitRegLocations() {
// the temp allocation initializes reg location as well (in order to deal with
// case when it will be called after this pass).
int max_regs = GetNumSSARegs() + GetMaxPossibleCompilerTemps();
- RegLocation* loc = static_cast<RegLocation*>(arena_->Alloc(max_regs * sizeof(*loc),
- kArenaAllocRegAlloc));
+ RegLocation* loc = arena_->AllocArray<RegLocation>(max_regs, kArenaAllocRegAlloc);
for (int i = 0; i < GetNumSSARegs(); i++) {
loc[i] = fresh_loc;
loc[i].s_reg_low = i;
diff --git a/compiler/utils/arena_allocator.h b/compiler/utils/arena_allocator.h
index 7f5bc9a..e730fd7 100644
--- a/compiler/utils/arena_allocator.h
+++ b/compiler/utils/arena_allocator.h
@@ -176,7 +176,7 @@ class ArenaAllocator : private DebugStackRefCounter, private ArenaAllocatorStats
ArenaAllocatorAdapter<void> Adapter(ArenaAllocKind kind = kArenaAllocSTL);
// Returns zeroed memory.
- void* Alloc(size_t bytes, ArenaAllocKind kind) ALWAYS_INLINE {
+ void* Alloc(size_t bytes, ArenaAllocKind kind = kArenaAllocMisc) ALWAYS_INLINE {
if (UNLIKELY(running_on_valgrind_)) {
return AllocValgrind(bytes, kind);
}
@@ -194,8 +194,9 @@ class ArenaAllocator : private DebugStackRefCounter, private ArenaAllocatorStats
return ret;
}
- template <typename T> T* AllocArray(size_t length) {
- return static_cast<T*>(Alloc(length * sizeof(T), kArenaAllocMisc));
+ template <typename T>
+ T* AllocArray(size_t length, ArenaAllocKind kind = kArenaAllocMisc) {
+ return static_cast<T*>(Alloc(length * sizeof(T), kind));
}
void* AllocValgrind(size_t bytes, ArenaAllocKind kind);
diff --git a/compiler/utils/arena_containers.h b/compiler/utils/arena_containers.h
index 8252591..a7a7438 100644
--- a/compiler/utils/arena_containers.h
+++ b/compiler/utils/arena_containers.h
@@ -161,8 +161,7 @@ class ArenaAllocatorAdapter : private DebugStackReference, private ArenaAllocato
pointer allocate(size_type n, ArenaAllocatorAdapter<void>::pointer hint = nullptr) {
UNUSED(hint);
DCHECK_LE(n, max_size());
- return reinterpret_cast<T*>(arena_allocator_->Alloc(n * sizeof(T),
- ArenaAllocatorAdapterKind::Kind()));
+ return arena_allocator_->AllocArray<T>(n, ArenaAllocatorAdapterKind::Kind());
}
void deallocate(pointer p, size_type n) {
UNUSED(p, n);
diff --git a/compiler/utils/growable_array.h b/compiler/utils/growable_array.h
index 6af4853..fd43ea6 100644
--- a/compiler/utils/growable_array.h
+++ b/compiler/utils/growable_array.h
@@ -33,16 +33,14 @@ class GrowableArray : public ArenaObject<kArenaAllocGrowableArray> {
: arena_(arena),
num_allocated_(init_length),
num_used_(0) {
- elem_list_ = static_cast<T*>(arena_->Alloc(sizeof(T) * init_length,
- kArenaAllocGrowableArray));
+ elem_list_ = arena_->AllocArray<T>(init_length, kArenaAllocGrowableArray);
}
GrowableArray(ArenaAllocator* arena, size_t init_length, T initial_data)
: arena_(arena),
num_allocated_(init_length),
num_used_(init_length) {
- elem_list_ = static_cast<T*>(arena_->Alloc(sizeof(T) * init_length,
- kArenaAllocGrowableArray));
+ elem_list_ = arena_->AllocArray<T>(init_length, kArenaAllocGrowableArray);
for (size_t i = 0; i < init_length; ++i) {
elem_list_[i] = initial_data;
}
@@ -58,8 +56,7 @@ class GrowableArray : public ArenaObject<kArenaAllocGrowableArray> {
if (new_length > target_length) {
target_length = new_length;
}
- T* new_array = static_cast<T*>(arena_->Alloc(sizeof(T) * target_length,
- kArenaAllocGrowableArray));
+ T* new_array = arena_->AllocArray<T>(target_length, kArenaAllocGrowableArray);
memcpy(new_array, elem_list_, sizeof(T) * num_allocated_);
num_allocated_ = target_length;
elem_list_ = new_array;
diff --git a/compiler/utils/scoped_arena_allocator.h b/compiler/utils/scoped_arena_allocator.h
index 523f158..c46acbc 100644
--- a/compiler/utils/scoped_arena_allocator.h
+++ b/compiler/utils/scoped_arena_allocator.h
@@ -115,11 +115,16 @@ class ScopedArenaAllocator
void Reset();
- void* Alloc(size_t bytes, ArenaAllocKind kind) ALWAYS_INLINE {
+ void* Alloc(size_t bytes, ArenaAllocKind kind = kArenaAllocMisc) ALWAYS_INLINE {
DebugStackReference::CheckTop();
return arena_stack_->Alloc(bytes, kind);
}
+ template <typename T>
+ T* AllocArray(size_t length, ArenaAllocKind kind = kArenaAllocMisc) {
+ return static_cast<T*>(Alloc(length * sizeof(T), kind));
+ }
+
// Get adapter for use in STL containers. See scoped_arena_containers.h .
ScopedArenaAllocatorAdapter<void> Adapter(ArenaAllocKind kind = kArenaAllocSTL);