summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicolas Geoffray <ngeoffray@google.com>2015-02-03 15:12:35 +0000
committerNicolas Geoffray <ngeoffray@google.com>2015-02-03 17:13:17 +0000
commit4c204bafbc8d596894f8cb8ec696f5be1c6f12d8 (patch)
tree3608d188815a8a80e86f98611edcfe3bbaad8b17
parent08029544d72bd9bec162956978afcb59204ea97b (diff)
downloadart-4c204bafbc8d596894f8cb8ec696f5be1c6f12d8.zip
art-4c204bafbc8d596894f8cb8ec696f5be1c6f12d8.tar.gz
art-4c204bafbc8d596894f8cb8ec696f5be1c6f12d8.tar.bz2
Use a different block order when not compiling baseline.
Use the linearized order instead, as it puts blocks logically next to each other in a better way. Also, it does not contain dead blocks. Change-Id: Ie65b56041a093c8155e6c1e06351cb36a4053505
-rw-r--r--compiler/optimizing/code_generator.cc90
-rw-r--r--compiler/optimizing/code_generator.h21
-rw-r--r--compiler/optimizing/register_allocator.cc8
3 files changed, 63 insertions, 56 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 0a405c4..dc2446d 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -41,60 +41,57 @@ size_t CodeGenerator::GetCacheOffset(uint32_t index) {
}
void CodeGenerator::CompileBaseline(CodeAllocator* allocator, bool is_leaf) {
- const GrowableArray<HBasicBlock*>& blocks = GetGraph()->GetBlocks();
- DCHECK(blocks.Get(0) == GetGraph()->GetEntryBlock());
- DCHECK(GoesToNextBlock(GetGraph()->GetEntryBlock(), blocks.Get(1)));
- Initialize();
-
DCHECK_EQ(frame_size_, kUninitializedFrameSize);
+
+ Initialize();
if (!is_leaf) {
MarkNotLeaf();
}
- ComputeFrameSize(GetGraph()->GetNumberOfLocalVRegs()
- + GetGraph()->GetTemporariesVRegSlots()
- + 1 /* filler */,
- 0, /* the baseline compiler does not have live registers at slow path */
- 0, /* the baseline compiler does not have live registers at slow path */
- GetGraph()->GetMaximumNumberOfOutVRegs()
- + 1 /* current method */);
- GenerateFrameEntry();
+ InitializeCodeGeneration(GetGraph()->GetNumberOfLocalVRegs()
+ + GetGraph()->GetTemporariesVRegSlots()
+ + 1 /* filler */,
+ 0, /* the baseline compiler does not have live registers at slow path */
+ 0, /* the baseline compiler does not have live registers at slow path */
+ GetGraph()->GetMaximumNumberOfOutVRegs()
+ + 1 /* current method */,
+ GetGraph()->GetBlocks());
+ CompileInternal(allocator, /* is_baseline */ true);
+}
+void CodeGenerator::CompileInternal(CodeAllocator* allocator, bool is_baseline) {
HGraphVisitor* location_builder = GetLocationBuilder();
HGraphVisitor* instruction_visitor = GetInstructionVisitor();
- for (size_t i = 0, e = blocks.Size(); i < e; ++i) {
- HBasicBlock* block = blocks.Get(i);
+ DCHECK_EQ(current_block_index_, 0u);
+ GenerateFrameEntry();
+ for (size_t e = block_order_->Size(); current_block_index_ < e; ++current_block_index_) {
+ HBasicBlock* block = block_order_->Get(current_block_index_);
Bind(block);
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* current = it.Current();
- current->Accept(location_builder);
- InitLocations(current);
+ if (is_baseline) {
+ current->Accept(location_builder);
+ InitLocations(current);
+ }
current->Accept(instruction_visitor);
}
}
- GenerateSlowPaths();
+
+ // Generate the slow paths.
+ for (size_t i = 0, e = slow_paths_.Size(); i < e; ++i) {
+ slow_paths_.Get(i)->EmitNativeCode(this);
+ }
+
+ // Finalize instructions in assember;
Finalize(allocator);
}
void CodeGenerator::CompileOptimized(CodeAllocator* allocator) {
- // The frame size has already been computed during register allocation.
+ // The register allocator already called `InitializeCodeGeneration`,
+ // where the frame size has been computed.
DCHECK_NE(frame_size_, kUninitializedFrameSize);
- const GrowableArray<HBasicBlock*>& blocks = GetGraph()->GetBlocks();
- DCHECK(blocks.Get(0) == GetGraph()->GetEntryBlock());
- DCHECK(GoesToNextBlock(GetGraph()->GetEntryBlock(), blocks.Get(1)));
+ DCHECK(block_order_ != nullptr);
Initialize();
-
- GenerateFrameEntry();
- HGraphVisitor* instruction_visitor = GetInstructionVisitor();
- for (size_t i = 0, e = blocks.Size(); i < e; ++i) {
- HBasicBlock* block = blocks.Get(i);
- Bind(block);
- for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- HInstruction* current = it.Current();
- current->Accept(instruction_visitor);
- }
- }
- GenerateSlowPaths();
- Finalize(allocator);
+ CompileInternal(allocator, /* is_baseline */ false);
}
void CodeGenerator::Finalize(CodeAllocator* allocator) {
@@ -105,12 +102,6 @@ void CodeGenerator::Finalize(CodeAllocator* allocator) {
GetAssembler()->FinalizeInstructions(code);
}
-void CodeGenerator::GenerateSlowPaths() {
- for (size_t i = 0, e = slow_paths_.Size(); i < e; ++i) {
- slow_paths_.Get(i)->EmitNativeCode(this);
- }
-}
-
size_t CodeGenerator::FindFreeEntry(bool* array, size_t length) {
for (size_t i = 0; i < length; ++i) {
if (!array[i]) {
@@ -136,10 +127,14 @@ size_t CodeGenerator::FindTwoFreeConsecutiveAlignedEntries(bool* array, size_t l
return -1;
}
-void CodeGenerator::ComputeFrameSize(size_t number_of_spill_slots,
- size_t maximum_number_of_live_core_registers,
- size_t maximum_number_of_live_fp_registers,
- size_t number_of_out_slots) {
+void CodeGenerator::InitializeCodeGeneration(size_t number_of_spill_slots,
+ size_t maximum_number_of_live_core_registers,
+ size_t maximum_number_of_live_fp_registers,
+ size_t number_of_out_slots,
+ const GrowableArray<HBasicBlock*>& block_order) {
+ block_order_ = &block_order;
+ DCHECK(block_order_->Get(0) == GetGraph()->GetEntryBlock());
+ DCHECK(GoesToNextBlock(GetGraph()->GetEntryBlock(), block_order_->Get(1)));
ComputeSpillMask();
first_register_slot_in_slow_path_ = (number_of_out_slots + number_of_spill_slots) * kVRegSize;
@@ -326,8 +321,9 @@ void CodeGenerator::InitLocations(HInstruction* instruction) {
}
bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const {
- // We currently iterate over the block in insertion order.
- return current->GetBlockId() + 1 == next->GetBlockId();
+ DCHECK_EQ(block_order_->Get(current_block_index_), current);
+ return (current_block_index_ < block_order_->Size() - 1)
+ && (block_order_->Get(current_block_index_ + 1) == next);
}
CodeGenerator* CodeGenerator::Create(HGraph* graph,
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 45f02e5..ab63b91 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -113,10 +113,11 @@ class CodeGenerator {
virtual size_t GetWordSize() const = 0;
virtual size_t GetFloatingPointSpillSlotSize() const = 0;
virtual uintptr_t GetAddressOf(HBasicBlock* block) const = 0;
- void ComputeFrameSize(size_t number_of_spill_slots,
- size_t maximum_number_of_live_core_registers,
- size_t maximum_number_of_live_fp_registers,
- size_t number_of_out_slots);
+ void InitializeCodeGeneration(size_t number_of_spill_slots,
+ size_t maximum_number_of_live_core_registers,
+ size_t maximum_number_of_live_fp_registers,
+ size_t number_of_out_slots,
+ const GrowableArray<HBasicBlock*>& block_order);
int32_t GetStackSlot(HLocal* local) const;
Location GetTemporaryLocation(HTemporary* temp) const;
@@ -181,8 +182,6 @@ class CodeGenerator {
slow_paths_.Add(slow_path);
}
- void GenerateSlowPaths();
-
void BuildMappingTable(std::vector<uint8_t>* vector, DefaultSrcMap* src_map) const;
void BuildVMapTable(std::vector<uint8_t>* vector) const;
void BuildNativeGCMap(
@@ -253,6 +252,8 @@ class CodeGenerator {
compiler_options_(compiler_options),
pc_infos_(graph->GetArena(), 32),
slow_paths_(graph->GetArena(), 8),
+ block_order_(nullptr),
+ current_block_index_(0),
is_leaf_(true),
stack_map_stream_(graph->GetArena()) {}
@@ -312,6 +313,7 @@ class CodeGenerator {
private:
void InitLocations(HInstruction* instruction);
size_t GetStackOffsetOfSavedRegister(size_t index);
+ void CompileInternal(CodeAllocator* allocator, bool is_baseline);
HGraph* const graph_;
const CompilerOptions& compiler_options_;
@@ -319,6 +321,13 @@ class CodeGenerator {
GrowableArray<PcInfo> pc_infos_;
GrowableArray<SlowPathCode*> slow_paths_;
+ // The order to use for code generation.
+ const GrowableArray<HBasicBlock*>* block_order_;
+
+ // The current block index in `block_order_` of the block
+ // we are generating code for.
+ size_t current_block_index_;
+
bool is_leaf_;
StackMapStream stack_map_stream_;
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 6f8f688..0a3f24b 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -1459,9 +1459,11 @@ void RegisterAllocator::ConnectSplitSiblings(LiveInterval* interval,
}
void RegisterAllocator::Resolve() {
- codegen_->ComputeFrameSize(
- spill_slots_.Size(), maximum_number_of_live_core_registers_,
- maximum_number_of_live_fp_registers_, reserved_out_slots_);
+ codegen_->InitializeCodeGeneration(spill_slots_.Size(),
+ maximum_number_of_live_core_registers_,
+ maximum_number_of_live_fp_registers_,
+ reserved_out_slots_,
+ liveness_.GetLinearOrder());
// Adjust the Out Location of instructions.
// TODO: Use pointers of Location inside LiveInterval to avoid doing another iteration.