summaryrefslogtreecommitdiffstats
path: root/compiler/optimizing
diff options
context:
space:
mode:
authorNicolas Geoffray <ngeoffray@google.com>2014-03-19 10:34:11 +0000
committerNicolas Geoffray <ngeoffray@google.com>2014-03-31 09:44:40 +0100
commit8ccc3f5d06fd217cdaabd37e743adab2031d3720 (patch)
treeec8c904baafb4d9b9bfd582245e2d780bcdfaade /compiler/optimizing
parentad174d1b54bf2fa477bec71a0ca93595f54b8fe9 (diff)
downloadart-8ccc3f5d06fd217cdaabd37e743adab2031d3720.zip
art-8ccc3f5d06fd217cdaabd37e743adab2031d3720.tar.gz
art-8ccc3f5d06fd217cdaabd37e743adab2031d3720.tar.bz2
Add support for invoke-static in optimizing compiler.
Support is limited to calls without parameters and returning void. For simplicity, we currently follow the Quick ABI. Change-Id: I54805161141b7eac5959f1cae0dc138dd0b2e8a5
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/builder.cc19
-rw-r--r--compiler/optimizing/builder.h12
-rw-r--r--compiler/optimizing/code_generator.cc92
-rw-r--r--compiler/optimizing/code_generator.h40
-rw-r--r--compiler/optimizing/code_generator_arm.cc54
-rw-r--r--compiler/optimizing/code_generator_arm.h1
-rw-r--r--compiler/optimizing/code_generator_x86.cc56
-rw-r--r--compiler/optimizing/code_generator_x86.h2
-rw-r--r--compiler/optimizing/nodes.h37
-rw-r--r--compiler/optimizing/optimizing_compiler.cc18
10 files changed, 311 insertions, 20 deletions
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 39535e9..db77fee 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -16,10 +16,12 @@
*/
#include "dex_file.h"
+#include "dex_file-inl.h"
#include "dex_instruction.h"
#include "dex_instruction-inl.h"
#include "builder.h"
#include "nodes.h"
+#include "primitive.h"
namespace art {
@@ -192,6 +194,23 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, int32_
break;
}
+ case Instruction::INVOKE_STATIC: {
+ uint32_t method_idx = instruction.VRegB_35c();
+ const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
+ uint32_t return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
+ const char* descriptor = dex_file_->StringByTypeIdx(return_type_idx);
+ const size_t number_of_arguments = instruction.VRegA_35c();
+ if (number_of_arguments != 0) {
+ return false;
+ }
+ if (Primitive::GetType(descriptor[0]) != Primitive::kPrimVoid) {
+ return false;
+ }
+ current_block_->AddInstruction(new (arena_) HInvokeStatic(
+ arena_, number_of_arguments, dex_offset, method_idx));
+ break;
+ }
+
case Instruction::NOP:
break;
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index fff83a1..46ca9aa 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_OPTIMIZING_BUILDER_H_
#include "dex_file.h"
+#include "driver/dex_compilation_unit.h"
#include "utils/allocation.h"
#include "utils/growable_array.h"
@@ -33,7 +34,9 @@ class HLocal;
class HGraphBuilder : public ValueObject {
public:
- explicit HGraphBuilder(ArenaAllocator* arena)
+ HGraphBuilder(ArenaAllocator* arena,
+ const DexCompilationUnit* dex_compilation_unit = nullptr,
+ const DexFile* dex_file = nullptr)
: arena_(arena),
branch_targets_(arena, 0),
locals_(arena, 0),
@@ -42,7 +45,9 @@ class HGraphBuilder : public ValueObject {
current_block_(nullptr),
graph_(nullptr),
constant0_(nullptr),
- constant1_(nullptr) { }
+ constant1_(nullptr),
+ dex_file_(dex_file),
+ dex_compilation_unit_(dex_compilation_unit) { }
HGraph* BuildGraph(const DexFile::CodeItem& code);
@@ -83,6 +88,9 @@ class HGraphBuilder : public ValueObject {
HIntConstant* constant0_;
HIntConstant* constant1_;
+ const DexFile* const dex_file_;
+ const DexCompilationUnit* const dex_compilation_unit_;
+
DISALLOW_COPY_AND_ASSIGN(HGraphBuilder);
};
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index bb6ac84..b86665b 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -21,8 +21,11 @@
#include "dex/verified_method.h"
#include "driver/dex_compilation_unit.h"
#include "gc_map_builder.h"
+#include "leb128.h"
+#include "mapping_table.h"
#include "utils/assembler.h"
#include "verifier/dex_gc_map.h"
+#include "vmap_table.h"
namespace art {
@@ -120,8 +123,95 @@ void CodeGenerator::BuildNativeGCMap(
dex_compilation_unit.GetVerifiedMethod()->GetDexGcMap();
verifier::DexPcToReferenceMap dex_gc_map(&(gc_map_raw)[0]);
- GcMapBuilder builder(data, 0, 0, dex_gc_map.RegWidth());
+ uint32_t max_native_offset = 0;
+ for (size_t i = 0; i < pc_infos_.Size(); i++) {
+ uint32_t native_offset = pc_infos_.Get(i).native_pc;
+ if (native_offset > max_native_offset) {
+ max_native_offset = native_offset;
+ }
+ }
+
+ GcMapBuilder builder(data, pc_infos_.Size(), max_native_offset, dex_gc_map.RegWidth());
+ for (size_t i = 0; i < pc_infos_.Size(); i++) {
+ struct PcInfo pc_info = pc_infos_.Get(i);
+ uint32_t native_offset = pc_info.native_pc;
+ uint32_t dex_pc = pc_info.dex_pc;
+ const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false);
+ CHECK(references != NULL) << "Missing ref for dex pc 0x" << std::hex << dex_pc;
+ builder.AddEntry(native_offset, references);
+ }
}
+void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data) const {
+ uint32_t pc2dex_data_size = 0u;
+ uint32_t pc2dex_entries = pc_infos_.Size();
+ uint32_t pc2dex_offset = 0u;
+ int32_t pc2dex_dalvik_offset = 0;
+ uint32_t dex2pc_data_size = 0u;
+ uint32_t dex2pc_entries = 0u;
+
+ // We currently only have pc2dex entries.
+ for (size_t i = 0; i < pc2dex_entries; i++) {
+ struct PcInfo pc_info = pc_infos_.Get(i);
+ pc2dex_data_size += UnsignedLeb128Size(pc_info.native_pc - pc2dex_offset);
+ pc2dex_data_size += SignedLeb128Size(pc_info.dex_pc - pc2dex_dalvik_offset);
+ pc2dex_offset = pc_info.native_pc;
+ pc2dex_dalvik_offset = pc_info.dex_pc;
+ }
+
+ uint32_t total_entries = pc2dex_entries + dex2pc_entries;
+ uint32_t hdr_data_size = UnsignedLeb128Size(total_entries) + UnsignedLeb128Size(pc2dex_entries);
+ uint32_t data_size = hdr_data_size + pc2dex_data_size + dex2pc_data_size;
+ data->resize(data_size);
+
+ uint8_t* data_ptr = &(*data)[0];
+ uint8_t* write_pos = data_ptr;
+ write_pos = EncodeUnsignedLeb128(write_pos, total_entries);
+ write_pos = EncodeUnsignedLeb128(write_pos, pc2dex_entries);
+ DCHECK_EQ(static_cast<size_t>(write_pos - data_ptr), hdr_data_size);
+ uint8_t* write_pos2 = write_pos + pc2dex_data_size;
+
+ pc2dex_offset = 0u;
+ pc2dex_dalvik_offset = 0u;
+ for (size_t i = 0; i < pc2dex_entries; i++) {
+ struct PcInfo pc_info = pc_infos_.Get(i);
+ DCHECK(pc2dex_offset <= pc_info.native_pc);
+ write_pos = EncodeUnsignedLeb128(write_pos, pc_info.native_pc - pc2dex_offset);
+ write_pos = EncodeSignedLeb128(write_pos, pc_info.dex_pc - pc2dex_dalvik_offset);
+ pc2dex_offset = pc_info.native_pc;
+ pc2dex_dalvik_offset = pc_info.dex_pc;
+ }
+ DCHECK_EQ(static_cast<size_t>(write_pos - data_ptr), hdr_data_size + pc2dex_data_size);
+ DCHECK_EQ(static_cast<size_t>(write_pos2 - data_ptr), data_size);
+
+ if (kIsDebugBuild) {
+ // Verify the encoded table holds the expected data.
+ MappingTable table(data_ptr);
+ CHECK_EQ(table.TotalSize(), total_entries);
+ CHECK_EQ(table.PcToDexSize(), pc2dex_entries);
+ auto it = table.PcToDexBegin();
+ auto it2 = table.DexToPcBegin();
+ for (size_t i = 0; i < pc2dex_entries; i++) {
+ struct PcInfo pc_info = pc_infos_.Get(i);
+ CHECK_EQ(pc_info.native_pc, it.NativePcOffset());
+ CHECK_EQ(pc_info.dex_pc, it.DexPc());
+ ++it;
+ }
+ CHECK(it == table.PcToDexEnd());
+ CHECK(it2 == table.DexToPcEnd());
+ }
+}
+
+void CodeGenerator::BuildVMapTable(std::vector<uint8_t>* data) const {
+ Leb128EncodingVector vmap_encoder;
+ size_t size = 1 + 1 /* marker */ + 0;
+ vmap_encoder.Reserve(size + 1u); // All values are likely to be one byte in ULEB128 (<128).
+ vmap_encoder.PushBackUnsigned(size);
+ // We're currently always saving the frame pointer, so set it in the table as a temporary.
+ vmap_encoder.PushBackUnsigned(kVRegTempBaseReg + VmapTable::kEntryAdjustment);
+ vmap_encoder.PushBackUnsigned(VmapTable::kAdjustedFpMarker);
+
+ *data = vmap_encoder.GetData();
+}
} // namespace art
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 63f8cbf..24dcab6 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -38,6 +38,11 @@ class CodeAllocator {
DISALLOW_COPY_AND_ASSIGN(CodeAllocator);
};
+struct PcInfo {
+ uint32_t dex_pc;
+ uintptr_t native_pc;
+};
+
/**
* A Location is an abstraction over the potential location
* of an instruction. It could be in register or stack.
@@ -81,7 +86,8 @@ class Location : public ValueObject {
class LocationSummary : public ArenaObject {
public:
explicit LocationSummary(HInstruction* instruction)
- : inputs(instruction->GetBlock()->GetGraph()->GetArena(), instruction->InputCount()) {
+ : inputs(instruction->GetBlock()->GetGraph()->GetArena(), instruction->InputCount()),
+ temps(instruction->GetBlock()->GetGraph()->GetArena(), 0) {
inputs.SetSize(instruction->InputCount());
for (int i = 0; i < instruction->InputCount(); i++) {
inputs.Put(i, Location());
@@ -100,10 +106,19 @@ class LocationSummary : public ArenaObject {
output = Location(location);
}
+ void AddTemp(Location location) {
+ temps.Add(location);
+ }
+
+ Location GetTemp(uint32_t at) const {
+ return temps.Get(at);
+ }
+
Location Out() const { return output; }
private:
GrowableArray<Location> inputs;
+ GrowableArray<Location> temps;
Location output;
DISALLOW_COPY_AND_ASSIGN(LocationSummary);
@@ -134,9 +149,17 @@ class CodeGenerator : public ArenaObject {
uint32_t GetFrameSize() const { return frame_size_; }
void SetFrameSize(uint32_t size) { frame_size_ = size; }
+ uint32_t GetCoreSpillMask() const { return core_spill_mask_; }
+
+ void RecordPcInfo(uint32_t dex_pc) {
+ struct PcInfo pc_info;
+ pc_info.dex_pc = dex_pc;
+ pc_info.native_pc = GetAssembler()->CodeSize();
+ pc_infos_.Add(pc_info);
+ }
- void BuildMappingTable(std::vector<uint8_t>* vector) const { }
- void BuildVMapTable(std::vector<uint8_t>* vector) const { }
+ void BuildMappingTable(std::vector<uint8_t>* vector) const;
+ void BuildVMapTable(std::vector<uint8_t>* vector) const;
void BuildNativeGCMap(
std::vector<uint8_t>* vector, const DexCompilationUnit& dex_compilation_unit) const;
@@ -144,23 +167,26 @@ class CodeGenerator : public ArenaObject {
explicit CodeGenerator(HGraph* graph)
: frame_size_(0),
graph_(graph),
- block_labels_(graph->GetArena(), 0) {
+ block_labels_(graph->GetArena(), 0),
+ pc_infos_(graph->GetArena(), 32) {
block_labels_.SetSize(graph->GetBlocks()->Size());
}
~CodeGenerator() { }
+ // Frame size required for this method.
+ uint32_t frame_size_;
+ uint32_t core_spill_mask_;
+
private:
void InitLocations(HInstruction* instruction);
void CompileBlock(HBasicBlock* block);
void CompileEntryBlock();
- // Frame size required for this method.
- uint32_t frame_size_;
-
HGraph* const graph_;
// Labels for each block that will be compiled.
GrowableArray<Label> block_labels_;
+ GrowableArray<PcInfo> pc_infos_;
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 04bdc34..c85d67d 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -18,17 +18,27 @@
#include "utils/assembler.h"
#include "utils/arm/assembler_arm.h"
+#include "mirror/array.h"
+#include "mirror/art_method.h"
+
#define __ reinterpret_cast<ArmAssembler*>(GetAssembler())->
namespace art {
namespace arm {
void CodeGeneratorARM::GenerateFrameEntry() {
+ core_spill_mask_ |= (1 << LR);
+ // We're currently always using FP, which is callee-saved in Quick.
+ core_spill_mask_ |= (1 << FP);
+
__ PushList((1 << FP) | (1 << LR));
__ mov(FP, ShifterOperand(SP));
- if (GetFrameSize() != 0) {
- __ AddConstant(SP, -GetFrameSize());
- }
+
+ // Add the current ART method to the frame size, the return pc, and FP.
+ SetFrameSize(RoundUp(GetFrameSize() + 3 * kWordSize, kStackAlignment));
+ // PC and FP have already been pushed on the stack.
+ __ AddConstant(SP, -(GetFrameSize() - 2 * kWordSize));
+ __ str(R0, Address(SP, 0));
}
void CodeGeneratorARM::GenerateFrameExit() {
@@ -173,5 +183,43 @@ void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret) {
codegen_->GenerateFrameExit();
}
+void LocationsBuilderARM::VisitInvokeStatic(HInvokeStatic* invoke) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(invoke);
+ CHECK_EQ(invoke->InputCount(), 0);
+ locations->AddTemp(Location(R0));
+ invoke->SetLocations(locations);
+}
+
+void InstructionCodeGeneratorARM::LoadCurrentMethod(Register reg) {
+ __ ldr(reg, Address(SP, 0));
+}
+
+void InstructionCodeGeneratorARM::VisitInvokeStatic(HInvokeStatic* invoke) {
+ Register temp = invoke->GetLocations()->GetTemp(0).reg<Register>();
+ size_t index_in_cache = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
+ invoke->GetIndexInDexCache() * kWordSize;
+
+ // TODO: Implement all kinds of calls:
+ // 1) boot -> boot
+ // 2) app -> boot
+ // 3) app -> app
+ //
+ // Currently we implement the app -> app logic, which looks up in the resolve cache.
+
+ // temp = method;
+ LoadCurrentMethod(temp);
+ // temp = temp->dex_cache_resolved_methods_;
+ __ ldr(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
+ // temp = temp[index_in_cache]
+ __ ldr(temp, Address(temp, index_in_cache));
+ // LR = temp[offset_of_quick_compiled_code]
+ __ ldr(LR, Address(temp,
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
+ // LR()
+ __ blx(LR);
+
+ codegen_->RecordPcInfo(invoke->GetDexPc());
+}
+
} // namespace arm
} // namespace art
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 52a7bf4..7a2835d 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -58,6 +58,7 @@ class InstructionCodeGeneratorARM : public HGraphVisitor {
#undef DECLARE_VISIT_INSTRUCTION
Assembler* GetAssembler() const { return assembler_; }
+ void LoadCurrentMethod(Register reg);
private:
Assembler* const assembler_;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index c4bda56..54bff0c 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -18,18 +18,28 @@
#include "utils/assembler.h"
#include "utils/x86/assembler_x86.h"
+#include "mirror/array.h"
+#include "mirror/art_method.h"
+
#define __ reinterpret_cast<X86Assembler*>(GetAssembler())->
namespace art {
namespace x86 {
void CodeGeneratorX86::GenerateFrameEntry() {
+ // Create a fake register to mimic Quick.
+ static const int kFakeReturnRegister = 8;
+ core_spill_mask_ |= (1 << kFakeReturnRegister);
+ // We're currently always using EBP, which is callee-saved in Quick.
+ core_spill_mask_ |= (1 << EBP);
+
__ pushl(EBP);
__ movl(EBP, ESP);
-
- if (GetFrameSize() != 0) {
- __ subl(ESP, Immediate(GetFrameSize()));
- }
+ // Add the current ART method to the frame size, the return pc, and EBP.
+ SetFrameSize(RoundUp(GetFrameSize() + 3 * kWordSize, kStackAlignment));
+ // The PC and EBP have already been pushed on the stack.
+ __ subl(ESP, Immediate(GetFrameSize() - 2 * kWordSize));
+ __ movl(Address(ESP, 0), EAX);
}
void CodeGeneratorX86::GenerateFrameExit() {
@@ -45,6 +55,10 @@ void CodeGeneratorX86::Push(HInstruction* instruction, Location location) {
__ pushl(location.reg<Register>());
}
+void InstructionCodeGeneratorX86::LoadCurrentMethod(Register reg) {
+ __ movl(reg, Address(ESP, 0));
+}
+
void CodeGeneratorX86::Move(HInstruction* instruction, Location location) {
HIntConstant* constant = instruction->AsIntConstant();
if (constant != nullptr) {
@@ -110,7 +124,8 @@ void LocationsBuilderX86::VisitLoadLocal(HLoadLocal* local) {
static int32_t GetStackSlot(HLocal* local) {
// We are currently using EBP to access locals, so the offset must be negative.
- return (local->GetRegNumber() + 1) * -kWordSize;
+ // +1 for going backwards, +1 for the method pointer.
+ return (local->GetRegNumber() + 2) * -kWordSize;
}
void InstructionCodeGeneratorX86::VisitLoadLocal(HLoadLocal* load) {
@@ -172,5 +187,36 @@ void InstructionCodeGeneratorX86::VisitReturn(HReturn* ret) {
__ ret();
}
+void LocationsBuilderX86::VisitInvokeStatic(HInvokeStatic* invoke) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(invoke);
+ CHECK_EQ(invoke->InputCount(), 0);
+ locations->AddTemp(Location(EAX));
+ invoke->SetLocations(locations);
+}
+
+void InstructionCodeGeneratorX86::VisitInvokeStatic(HInvokeStatic* invoke) {
+ Register temp = invoke->GetLocations()->GetTemp(0).reg<Register>();
+ size_t index_in_cache = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
+ invoke->GetIndexInDexCache() * kWordSize;
+
+ // TODO: Implement all kinds of calls:
+ // 1) boot -> boot
+ // 2) app -> boot
+ // 3) app -> app
+ //
+ // Currently we implement the app -> app logic, which looks up in the resolve cache.
+
+ // temp = method;
+ LoadCurrentMethod(temp);
+ // temp = temp->dex_cache_resolved_methods_;
+ __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
+ // temp = temp[index_in_cache]
+ __ movl(temp, Address(temp, index_in_cache));
+ // (temp + offset_of_quick_compiled_code)()
+ __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
+
+ codegen_->RecordPcInfo(invoke->GetDexPc());
+}
+
} // namespace x86
} // namespace art
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index ad2a061..505237b 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -57,6 +57,8 @@ class InstructionCodeGeneratorX86 : public HGraphVisitor {
#undef DECLARE_VISIT_INSTRUCTION
+ void LoadCurrentMethod(Register reg);
+
Assembler* GetAssembler() const { return assembler_; }
private:
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index e74ed82..50d5c59 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -185,6 +185,7 @@ class HBasicBlock : public ArenaObject {
M(Goto) \
M(If) \
M(IntConstant) \
+ M(InvokeStatic) \
M(LoadLocal) \
M(Local) \
M(Return) \
@@ -554,6 +555,42 @@ class HIntConstant : public HTemplateInstruction<0> {
DISALLOW_COPY_AND_ASSIGN(HIntConstant);
};
+class HInvoke : public HInstruction {
+ public:
+ HInvoke(ArenaAllocator* arena, uint32_t number_of_arguments, int32_t dex_pc)
+ : inputs_(arena, number_of_arguments),
+ dex_pc_(dex_pc) {
+ inputs_.SetSize(number_of_arguments);
+ }
+
+ virtual intptr_t InputCount() const { return inputs_.Size(); }
+ virtual HInstruction* InputAt(intptr_t i) const { return inputs_.Get(i); }
+
+ int32_t GetDexPc() const { return dex_pc_; }
+
+ protected:
+ GrowableArray<HInstruction*> inputs_;
+ const int32_t dex_pc_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HInvoke);
+};
+
+class HInvokeStatic : public HInvoke {
+ public:
+ HInvokeStatic(ArenaAllocator* arena, uint32_t number_of_arguments, int32_t dex_pc, int32_t index_in_dex_cache)
+ : HInvoke(arena, number_of_arguments, dex_pc), index_in_dex_cache_(index_in_dex_cache) { }
+
+ uint32_t GetIndexInDexCache() const { return index_in_dex_cache_; }
+
+ DECLARE_INSTRUCTION(InvokeStatic)
+
+ private:
+ uint32_t index_in_dex_cache_;
+
+ DISALLOW_COPY_AND_ASSIGN(HInvokeStatic);
+};
+
class HGraphVisitor : public ValueObject {
public:
explicit HGraphVisitor(HGraph* graph) : graph_(graph) { }
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 334b185..d19c40c 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -62,17 +62,31 @@ CompiledMethod* OptimizingCompiler::TryCompile(CompilerDriver& driver,
nullptr, class_loader, art::Runtime::Current()->GetClassLinker(), dex_file, code_item,
class_def_idx, method_idx, access_flags, driver.GetVerifiedMethod(&dex_file, method_idx));
+ // For testing purposes, we put a special marker on method names that should be compiled
+ // with this compiler. This makes sure we're not regressing.
+ bool shouldCompile = dex_compilation_unit.GetSymbol().find("00024opt_00024") != std::string::npos;
+
ArenaPool pool;
ArenaAllocator arena(&pool);
- HGraphBuilder builder(&arena);
+ HGraphBuilder builder(&arena, &dex_compilation_unit, &dex_file);
HGraph* graph = builder.BuildGraph(*code_item);
if (graph == nullptr) {
+ if (shouldCompile) {
+ LOG(FATAL) << "Could not build graph in optimizing compiler";
+ }
return nullptr;
}
InstructionSet instruction_set = driver.GetInstructionSet();
+ // The optimizing compiler currently does not have a Thumb2 assembler.
+ if (instruction_set == kThumb2) {
+ instruction_set = kArm;
+ }
CodeGenerator* codegen = CodeGenerator::Create(&arena, graph, instruction_set);
if (codegen == nullptr) {
+ if (shouldCompile) {
+ LOG(FATAL) << "Could not find code generator for optimizing compiler";
+ }
return nullptr;
}
@@ -90,7 +104,7 @@ CompiledMethod* OptimizingCompiler::TryCompile(CompilerDriver& driver,
instruction_set,
allocator.GetMemory(),
codegen->GetFrameSize(),
- 0, /* GPR spill mask, unused */
+ codegen->GetCoreSpillMask(),
0, /* FPR spill mask, unused */
mapping_table,
vmap_table,