summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--compiler/driver/compiler_driver.cc9
-rw-r--r--compiler/elf_builder.h227
-rw-r--r--compiler/elf_patcher.cc5
-rw-r--r--compiler/image_writer.cc8
-rw-r--r--compiler/optimizing/code_generator.cc52
-rw-r--r--compiler/optimizing/code_generator.h11
-rw-r--r--compiler/optimizing/code_generator_arm.cc14
-rw-r--r--compiler/optimizing/code_generator_arm.h2
-rw-r--r--compiler/optimizing/code_generator_x86.cc33
-rw-r--r--compiler/optimizing/code_generator_x86.h2
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc34
-rw-r--r--compiler/optimizing/code_generator_x86_64.h2
-rw-r--r--compiler/optimizing/graph_checker.cc113
-rw-r--r--compiler/optimizing/graph_checker.h5
-rw-r--r--compiler/optimizing/graph_visualizer.cc8
-rw-r--r--compiler/optimizing/locations.cc2
-rw-r--r--compiler/optimizing/locations.h44
-rw-r--r--compiler/optimizing/nodes.cc12
-rw-r--r--compiler/optimizing/nodes.h15
-rw-r--r--compiler/optimizing/register_allocator.cc52
-rw-r--r--compiler/optimizing/register_allocator.h3
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.h13
-rw-r--r--compiler/optimizing/ssa_phi_elimination.cc6
-rw-r--r--dex2oat/dex2oat.cc1
-rw-r--r--runtime/Android.mk5
-rw-r--r--runtime/class_linker.cc15
-rw-r--r--runtime/debugger.cc23
-rw-r--r--runtime/gc/heap.cc33
-rw-r--r--runtime/interpreter/interpreter.cc28
-rw-r--r--runtime/native/dalvik_system_DexFile.cc19
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc6
-rw-r--r--runtime/native_bridge_art_interface.cc39
-rw-r--r--runtime/native_bridge_art_interface.h14
-rw-r--r--runtime/runtime.cc25
-rw-r--r--runtime/runtime.h14
-rw-r--r--runtime/thread.h17
-rw-r--r--runtime/verifier/method_verifier.cc22
-rw-r--r--runtime/verifier/method_verifier.h15
-rw-r--r--runtime/verifier/reg_type.h2
-rw-r--r--runtime/verifier/reg_type_cache.cc1
-rw-r--r--runtime/verifier/reg_type_test.cc8
-rw-r--r--test/115-native-bridge/expected.txt1
-rw-r--r--test/115-native-bridge/nativebridge.cc43
-rw-r--r--test/409-materialized-condition/expected.txt5
-rw-r--r--test/409-materialized-condition/info.txt1
-rw-r--r--test/409-materialized-condition/src/Main.java66
46 files changed, 828 insertions, 247 deletions
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index d743f90..990c1c8 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -795,14 +795,11 @@ void CompilerDriver::UpdateImageClasses(TimingLogger* timings) {
if (IsImage()) {
TimingLogger::ScopedTiming t("UpdateImageClasses", timings);
// Update image_classes_ with classes for objects created by <clinit> methods.
- Thread* self = Thread::Current();
- const char* old_cause = self->StartAssertNoThreadSuspension("ImageWriter");
gc::Heap* heap = Runtime::Current()->GetHeap();
// TODO: Image spaces only?
ScopedObjectAccess soa(Thread::Current());
- WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ WriterMutexLock mu(soa.Self(), *Locks::heap_bitmap_lock_);
heap->VisitObjects(FindClinitImageClassesCallback, this);
- self->EndAssertNoThreadSuspension(old_cause);
}
}
@@ -1872,7 +1869,8 @@ static void InitializeClass(const ParallelCompilationManager* manager, size_t cl
// TODO we detach transaction from runtime to indicate we quit the transactional
// mode which prevents the GC from visiting objects modified during the transaction.
// Ensure GC is not run so don't access freed objects when aborting transaction.
- const char* old_casue = soa.Self()->StartAssertNoThreadSuspension("Transaction end");
+
+ ScopedAssertNoThreadSuspension ants(soa.Self(), "Transaction end");
runtime->ExitTransactionMode();
if (!success) {
@@ -1885,7 +1883,6 @@ static void InitializeClass(const ParallelCompilationManager* manager, size_t cl
transaction.Abort();
CHECK_EQ(old_status, klass->GetStatus()) << "Previous class status not restored";
}
- soa.Self()->EndAssertNoThreadSuspension(old_casue);
}
}
soa.Self()->AssertNoPendingException();
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index 35320f5..e535b6d 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_ELF_BUILDER_H_
#define ART_COMPILER_ELF_BUILDER_H_
+#include "base/stl_util.h"
#include "buffered_output_stream.h"
#include "elf_utils.h"
#include "file_output_stream.h"
@@ -354,12 +355,124 @@ class ElfSymtabBuilder : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>
ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> strtab_;
};
+template <typename Elf_Word>
+class ElfFilePiece {
+ public:
+ virtual ~ElfFilePiece() {}
+
+ virtual bool Write(File* elf_file) {
+ if (static_cast<off_t>(offset_) != lseek(elf_file->Fd(), offset_, SEEK_SET)) {
+ PLOG(ERROR) << "Failed to seek to " << GetDescription() << " offset " << offset_ << " for "
+ << elf_file->GetPath();
+ return false;
+ }
+
+ return DoActualWrite(elf_file);
+ }
+
+ static bool Compare(ElfFilePiece* a, ElfFilePiece* b) {
+ return a->offset_ < b->offset_;
+ }
+
+ protected:
+ explicit ElfFilePiece(Elf_Word offset) : offset_(offset) {}
+
+ virtual std::string GetDescription() = 0;
+ virtual bool DoActualWrite(File* elf_file) = 0;
+
+ Elf_Word offset_;
+};
+
+template <typename Elf_Word>
+class ElfFileMemoryPiece : public ElfFilePiece<Elf_Word> {
+ public:
+ ElfFileMemoryPiece(const std::string& name, Elf_Word offset, const void* data, Elf_Word size)
+ : ElfFilePiece<Elf_Word>(offset), dbg_name_(name), data_(data), size_(size) {}
+
+ bool DoActualWrite(File* elf_file) OVERRIDE {
+ DCHECK(data_ != nullptr || size_ == 0U) << dbg_name_ << " " << size_;
+
+ if (!elf_file->WriteFully(data_, size_)) {
+ PLOG(ERROR) << "Failed to write " << dbg_name_ << " for " << elf_file->GetPath();
+ return false;
+ }
+
+ return true;
+ }
+
+ std::string GetDescription() OVERRIDE {
+ return dbg_name_;
+ }
+
+ private:
+ const std::string& dbg_name_;
+ const void *data_;
+ Elf_Word size_;
+};
+
class CodeOutput {
public:
virtual bool Write(OutputStream* out) = 0;
virtual ~CodeOutput() {}
};
+template <typename Elf_Word>
+class ElfFileRodataPiece : public ElfFilePiece<Elf_Word> {
+ public:
+ ElfFileRodataPiece(Elf_Word offset, CodeOutput* output) : ElfFilePiece<Elf_Word>(offset),
+ output_(output) {}
+
+ bool DoActualWrite(File* elf_file) OVERRIDE {
+ std::unique_ptr<BufferedOutputStream> output_stream(
+ new BufferedOutputStream(new FileOutputStream(elf_file)));
+ if (!output_->Write(output_stream.get())) {
+ PLOG(ERROR) << "Failed to write .rodata and .text for " << elf_file->GetPath();
+ return false;
+ }
+
+ return true;
+ }
+
+ std::string GetDescription() OVERRIDE {
+ return ".rodata";
+ }
+
+ private:
+ CodeOutput* output_;
+};
+
+template <typename Elf_Word>
+class ElfFileOatTextPiece : public ElfFilePiece<Elf_Word> {
+ public:
+ ElfFileOatTextPiece(Elf_Word offset, CodeOutput* output) : ElfFilePiece<Elf_Word>(offset),
+ output_(output) {}
+
+ bool DoActualWrite(File* elf_file) OVERRIDE {
+ // All data is written by the ElfFileRodataPiece right now, as the oat writer writes in one
+ // piece. This is for future flexibility.
+ UNUSED(output_);
+ return true;
+ }
+
+ std::string GetDescription() OVERRIDE {
+ return ".text";
+ }
+
+ private:
+ CodeOutput* output_;
+};
+
+template <typename Elf_Word>
+static bool WriteOutFile(const std::vector<ElfFilePiece<Elf_Word>*>& pieces, File* elf_file) {
+ // TODO It would be nice if this checked for overlap.
+ for (auto it = pieces.begin(); it != pieces.end(); ++it) {
+ if (!(*it)->Write(elf_file)) {
+ return false;
+ }
+ }
+ return true;
+}
+
template <typename Elf_Word, typename Elf_Shdr>
static inline constexpr Elf_Word NextOffset(const Elf_Shdr& cur, const Elf_Shdr& prev) {
return RoundUp(prev.sh_size + prev.sh_offset, cur.sh_addralign);
@@ -667,7 +780,7 @@ class ElfBuilder FINAL {
}
bool Write() {
- std::vector<ElfFilePiece> pieces;
+ std::vector<ElfFilePiece<Elf_Word>*> pieces;
Elf_Shdr prev = dynamic_builder_.section_;
std::string strtab;
@@ -746,8 +859,9 @@ class ElfBuilder FINAL {
it->section_.sh_addr = 0;
it->section_.sh_size = it->GetBuffer()->size();
it->section_.sh_link = it->GetLink();
- pieces.push_back(ElfFilePiece(it->name_, it->section_.sh_offset,
- it->GetBuffer()->data(), it->GetBuffer()->size()));
+
+ // We postpone adding an ElfFilePiece to keep the order in "pieces."
+
prev = it->section_;
if (debug_logging_) {
LOG(INFO) << it->name_ << " off=" << it->section_.sh_offset
@@ -824,55 +938,62 @@ class ElfBuilder FINAL {
elf_header_.e_shstrndx = shstrtab_builder_.section_index_;
// Add the rest of the pieces to the list.
- pieces.push_back(ElfFilePiece("Elf Header", 0, &elf_header_, sizeof(elf_header_)));
- pieces.push_back(ElfFilePiece("Program headers", PHDR_OFFSET,
- &program_headers_, sizeof(program_headers_)));
- pieces.push_back(ElfFilePiece(".dynamic", dynamic_builder_.section_.sh_offset,
- dynamic.data(), dynamic_builder_.section_.sh_size));
- pieces.push_back(ElfFilePiece(".dynsym", dynsym_builder_.section_.sh_offset,
- dynsym.data(), dynsym.size() * sizeof(Elf_Sym)));
- pieces.push_back(ElfFilePiece(".dynstr", dynsym_builder_.GetStrTab()->section_.sh_offset,
- dynstr_.c_str(), dynstr_.size()));
- pieces.push_back(ElfFilePiece(".hash", hash_builder_.section_.sh_offset,
- hash_.data(), hash_.size() * sizeof(Elf_Word)));
- pieces.push_back(ElfFilePiece(".rodata", rodata_builder_.section_.sh_offset,
- nullptr, rodata_builder_.section_.sh_size));
- pieces.push_back(ElfFilePiece(".text", text_builder_.section_.sh_offset,
- nullptr, text_builder_.section_.sh_size));
+ pieces.push_back(new ElfFileMemoryPiece<Elf_Word>("Elf Header", 0, &elf_header_,
+ sizeof(elf_header_)));
+ pieces.push_back(new ElfFileMemoryPiece<Elf_Word>("Program headers", PHDR_OFFSET,
+ &program_headers_, sizeof(program_headers_)));
+ pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".dynamic",
+ dynamic_builder_.section_.sh_offset,
+ dynamic.data(),
+ dynamic_builder_.section_.sh_size));
+ pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".dynsym", dynsym_builder_.section_.sh_offset,
+ dynsym.data(),
+ dynsym.size() * sizeof(Elf_Sym)));
+ pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".dynstr",
+ dynsym_builder_.GetStrTab()->section_.sh_offset,
+ dynstr_.c_str(), dynstr_.size()));
+ pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".hash", hash_builder_.section_.sh_offset,
+ hash_.data(),
+ hash_.size() * sizeof(Elf_Word)));
+ pieces.push_back(new ElfFileRodataPiece<Elf_Word>(rodata_builder_.section_.sh_offset,
+ oat_writer_));
+ pieces.push_back(new ElfFileOatTextPiece<Elf_Word>(text_builder_.section_.sh_offset,
+ oat_writer_));
if (IncludingDebugSymbols()) {
- pieces.push_back(ElfFilePiece(".symtab", symtab_builder_.section_.sh_offset,
- symtab.data(), symtab.size() * sizeof(Elf_Sym)));
- pieces.push_back(ElfFilePiece(".strtab", symtab_builder_.GetStrTab()->section_.sh_offset,
- strtab.c_str(), strtab.size()));
+ pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".symtab",
+ symtab_builder_.section_.sh_offset,
+ symtab.data(),
+ symtab.size() * sizeof(Elf_Sym)));
+ pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".strtab",
+ symtab_builder_.GetStrTab()->section_.sh_offset,
+ strtab.c_str(), strtab.size()));
}
- pieces.push_back(ElfFilePiece(".shstrtab", shstrtab_builder_.section_.sh_offset,
- &shstrtab_[0], shstrtab_.size()));
+ pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".shstrtab",
+ shstrtab_builder_.section_.sh_offset,
+ &shstrtab_[0], shstrtab_.size()));
for (uint32_t i = 0; i < section_ptrs_.size(); ++i) {
// Just add all the sections in induvidually since they are all over the
// place on the heap/stack.
Elf_Word cur_off = sections_offset + i * sizeof(Elf_Shdr);
- pieces.push_back(ElfFilePiece("section table piece", cur_off,
- section_ptrs_[i], sizeof(Elf_Shdr)));
+ pieces.push_back(new ElfFileMemoryPiece<Elf_Word>("section table piece", cur_off,
+ section_ptrs_[i], sizeof(Elf_Shdr)));
+ }
+
+ // Postponed debug info.
+ for (auto it = other_builders_.begin(); it != other_builders_.end(); ++it) {
+ pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(it->name_, it->section_.sh_offset,
+ it->GetBuffer()->data(),
+ it->GetBuffer()->size()));
}
if (!WriteOutFile(pieces)) {
LOG(ERROR) << "Unable to write to file " << elf_file_->GetPath();
- return false;
- }
- // write out the actual oat file data.
- Elf_Word oat_data_offset = rodata_builder_.section_.sh_offset;
- if (static_cast<off_t>(oat_data_offset) != lseek(elf_file_->Fd(), oat_data_offset, SEEK_SET)) {
- PLOG(ERROR) << "Failed to seek to .rodata offset " << oat_data_offset
- << " for " << elf_file_->GetPath();
- return false;
- }
- std::unique_ptr<BufferedOutputStream> output_stream(
- new BufferedOutputStream(new FileOutputStream(elf_file_)));
- if (!oat_writer_->Write(output_stream.get())) {
- PLOG(ERROR) << "Failed to write .rodata and .text for " << elf_file_->GetPath();
+
+ STLDeleteElements(&pieces); // Have to manually clean pieces.
return false;
}
+ STLDeleteElements(&pieces); // Have to manually clean pieces.
return true;
}
@@ -1028,34 +1149,12 @@ class ElfBuilder FINAL {
}
}
- struct ElfFilePiece {
- ElfFilePiece(const std::string& name, Elf_Word offset, const void* data, Elf_Word size)
- : dbg_name_(name), offset_(offset), data_(data), size_(size) {}
- ~ElfFilePiece() {}
-
- const std::string& dbg_name_;
- Elf_Word offset_;
- const void *data_;
- Elf_Word size_;
- static bool Compare(ElfFilePiece a, ElfFilePiece b) {
- return a.offset_ < b.offset_;
- }
- };
// Write each of the pieces out to the file.
- bool WriteOutFile(const std::vector<ElfFilePiece>& pieces) {
- // TODO It would be nice if this checked for overlap.
+ bool WriteOutFile(const std::vector<ElfFilePiece<Elf_Word>*>& pieces) {
for (auto it = pieces.begin(); it != pieces.end(); ++it) {
- if (it->data_) {
- if (static_cast<off_t>(it->offset_) != lseek(elf_file_->Fd(), it->offset_, SEEK_SET)) {
- PLOG(ERROR) << "Failed to seek to " << it->dbg_name_ << " offset location "
- << it->offset_ << " for " << elf_file_->GetPath();
- return false;
- }
- if (!elf_file_->WriteFully(it->data_, it->size_)) {
- PLOG(ERROR) << "Failed to write " << it->dbg_name_ << " for " << elf_file_->GetPath();
- return false;
- }
+ if (!(*it)->Write(elf_file_)) {
+ return false;
}
}
return true;
diff --git a/compiler/elf_patcher.cc b/compiler/elf_patcher.cc
index 92eb4d8..0646b75 100644
--- a/compiler/elf_patcher.cc
+++ b/compiler/elf_patcher.cc
@@ -188,9 +188,8 @@ bool ElfPatcher::PatchElf() {
compiler_driver_->GetMethodsToPatch().size() +
compiler_driver_->GetClassesToPatch().size());
}
- Thread* self = Thread::Current();
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- const char* old_cause = self->StartAssertNoThreadSuspension("ElfPatcher");
+ ScopedAssertNoThreadSuspension ants(Thread::Current(), "ElfPatcher");
typedef std::vector<const CompilerDriver::CallPatchInformation*> CallPatches;
const CallPatches& code_to_patch = compiler_driver_->GetCodeToPatch();
@@ -259,8 +258,6 @@ bool ElfPatcher::PatchElf() {
SetPatchLocation(patch, PointerToLowMemUInt32(get_image_address_(cb_data_, target)));
}
- self->EndAssertNoThreadSuspension(old_cause);
-
if (write_patches_) {
return WriteOutPatchData();
}
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 9c9cdf2..c08d3bd 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -543,11 +543,9 @@ void ImageWriter::CalculateNewObjectOffsets(size_t oat_loaded_size, size_t oat_d
{
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
// TODO: Image spaces only?
- const char* old = self->StartAssertNoThreadSuspension("ImageWriter");
DCHECK_LT(image_end_, image_->Size());
// Clear any pre-existing monitors which may have been in the monitor words.
heap->VisitObjects(WalkFieldsCallback, this);
- self->EndAssertNoThreadSuspension(old);
}
const byte* oat_file_begin = image_begin_ + RoundUp(image_end_, kPageSize);
@@ -577,20 +575,18 @@ void ImageWriter::CalculateNewObjectOffsets(size_t oat_loaded_size, size_t oat_d
void ImageWriter::CopyAndFixupObjects()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Thread* self = Thread::Current();
- const char* old_cause = self->StartAssertNoThreadSuspension("ImageWriter");
+ ScopedAssertNoThreadSuspension ants(Thread::Current(), "ImageWriter");
gc::Heap* heap = Runtime::Current()->GetHeap();
// TODO: heap validation can't handle this fix up pass
heap->DisableObjectValidation();
// TODO: Image spaces only?
- WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ WriterMutexLock mu(ants.Self(), *Locks::heap_bitmap_lock_);
heap->VisitObjects(CopyAndFixupObjectsCallback, this);
// Fix up the object previously had hash codes.
for (const std::pair<mirror::Object*, uint32_t>& hash_pair : saved_hashes_) {
hash_pair.first->SetLockWord(LockWord::FromHashCode(hash_pair.second), false);
}
saved_hashes_.clear();
- self->EndAssertNoThreadSuspension(old_cause);
}
void ImageWriter::CopyAndFixupObjectsCallback(Object* obj, void* arg) {
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 2547a29..3231c99 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -44,6 +44,7 @@ void CodeGenerator::CompileBaseline(CodeAllocator* allocator, bool is_leaf) {
ComputeFrameSize(GetGraph()->GetNumberOfLocalVRegs()
+ GetGraph()->GetNumberOfTemporaries()
+ 1 /* filler */,
+ 0, /* the baseline compiler does not have live registers at slow path */
GetGraph()->GetMaximumNumberOfOutVRegs()
+ 1 /* current method */);
GenerateFrameEntry();
@@ -111,10 +112,15 @@ size_t CodeGenerator::AllocateFreeRegisterInternal(
return -1;
}
-void CodeGenerator::ComputeFrameSize(size_t number_of_spill_slots, size_t number_of_out_slots) {
+void CodeGenerator::ComputeFrameSize(size_t number_of_spill_slots,
+ size_t maximum_number_of_live_registers,
+ size_t number_of_out_slots) {
+ first_register_slot_in_slow_path_ = (number_of_out_slots + number_of_spill_slots) * kVRegSize;
+
SetFrameSize(RoundUp(
number_of_spill_slots * kVRegSize
+ number_of_out_slots * kVRegSize
+ + maximum_number_of_live_registers * GetWordSize()
+ FrameEntrySpillSize(),
kStackAlignment));
}
@@ -468,4 +474,48 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction, uint32_t dex_pc) {
}
}
+size_t CodeGenerator::GetStackOffsetOfSavedRegister(size_t index) {
+ return first_register_slot_in_slow_path_ + index * GetWordSize();
+}
+
+void CodeGenerator::SaveLiveRegisters(LocationSummary* locations) {
+ RegisterSet* register_set = locations->GetLiveRegisters();
+ uint32_t count = 0;
+ for (size_t i = 0, e = GetNumberOfCoreRegisters(); i < e; ++i) {
+ if (register_set->ContainsCoreRegister(i)) {
+ size_t stack_offset = GetStackOffsetOfSavedRegister(count);
+ ++count;
+ SaveCoreRegister(Location::StackSlot(stack_offset), i);
+ // If the register holds an object, update the stack mask.
+ if (locations->RegisterContainsObject(i)) {
+ locations->SetStackBit(stack_offset / kVRegSize);
+ }
+ }
+ }
+
+ for (size_t i = 0, e = GetNumberOfFloatingPointRegisters(); i < e; ++i) {
+ if (register_set->ContainsFloatingPointRegister(i)) {
+ LOG(FATAL) << "Unimplemented";
+ }
+ }
+}
+
+void CodeGenerator::RestoreLiveRegisters(LocationSummary* locations) {
+ RegisterSet* register_set = locations->GetLiveRegisters();
+ uint32_t count = 0;
+ for (size_t i = 0, e = GetNumberOfCoreRegisters(); i < e; ++i) {
+ if (register_set->ContainsCoreRegister(i)) {
+ size_t stack_offset = GetStackOffsetOfSavedRegister(count);
+ ++count;
+ RestoreCoreRegister(Location::StackSlot(stack_offset), i);
+ }
+ }
+
+ for (size_t i = 0, e = GetNumberOfFloatingPointRegisters(); i < e; ++i) {
+ if (register_set->ContainsFloatingPointRegister(i)) {
+ LOG(FATAL) << "Unimplemented";
+ }
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index a83d703..55f5d8d 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -98,7 +98,9 @@ class CodeGenerator : public ArenaObject {
virtual HGraphVisitor* GetInstructionVisitor() = 0;
virtual Assembler* GetAssembler() = 0;
virtual size_t GetWordSize() const = 0;
- void ComputeFrameSize(size_t number_of_spill_slots, size_t number_of_out_slots);
+ void ComputeFrameSize(size_t number_of_spill_slots,
+ size_t maximum_number_of_live_registers,
+ size_t number_of_out_slots);
virtual size_t FrameEntrySpillSize() const = 0;
int32_t GetStackSlot(HLocal* local) const;
Location GetTemporaryLocation(HTemporary* temp) const;
@@ -114,6 +116,8 @@ class CodeGenerator : public ArenaObject {
virtual void DumpCoreRegister(std::ostream& stream, int reg) const = 0;
virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const = 0;
virtual InstructionSet GetInstructionSet() const = 0;
+ virtual void SaveCoreRegister(Location stack_location, uint32_t reg_id) = 0;
+ virtual void RestoreCoreRegister(Location stack_location, uint32_t reg_id) = 0;
void RecordPcInfo(HInstruction* instruction, uint32_t dex_pc);
@@ -128,6 +132,8 @@ class CodeGenerator : public ArenaObject {
void BuildNativeGCMap(
std::vector<uint8_t>* vector, const DexCompilationUnit& dex_compilation_unit) const;
void BuildStackMaps(std::vector<uint8_t>* vector);
+ void SaveLiveRegisters(LocationSummary* locations);
+ void RestoreLiveRegisters(LocationSummary* locations);
bool IsLeafMethod() const {
return is_leaf_;
@@ -141,6 +147,7 @@ class CodeGenerator : public ArenaObject {
CodeGenerator(HGraph* graph, size_t number_of_registers)
: frame_size_(kUninitializedFrameSize),
core_spill_mask_(0),
+ first_register_slot_in_slow_path_(0),
graph_(graph),
block_labels_(graph->GetArena(), 0),
pc_infos_(graph->GetArena(), 32),
@@ -166,9 +173,11 @@ class CodeGenerator : public ArenaObject {
// Frame size required for this method.
uint32_t frame_size_;
uint32_t core_spill_mask_;
+ uint32_t first_register_slot_in_slow_path_;
private:
void InitLocations(HInstruction* instruction);
+ size_t GetStackOffsetOfSavedRegister(size_t index);
HGraph* const graph_;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index ad62279..206ed13 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -98,10 +98,12 @@ class SuspendCheckSlowPathARM : public SlowPathCode {
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
__ Bind(GetEntryLabel());
+ codegen->SaveLiveRegisters(instruction_->GetLocations());
int32_t offset = QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pTestSuspend).Int32Value();
__ ldr(LR, Address(TR, offset));
__ blx(LR);
codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ codegen->RestoreLiveRegisters(instruction_->GetLocations());
__ b(GetReturnLabel());
}
@@ -182,6 +184,14 @@ void CodeGeneratorARM::DumpFloatingPointRegister(std::ostream& stream, int reg)
stream << ArmManagedRegister::FromDRegister(DRegister(reg));
}
+void CodeGeneratorARM::SaveCoreRegister(Location stack_location, uint32_t reg_id) {
+ __ str(static_cast<Register>(reg_id), Address(SP, stack_location.GetStackIndex()));
+}
+
+void CodeGeneratorARM::RestoreCoreRegister(Location stack_location, uint32_t reg_id) {
+ __ ldr(static_cast<Register>(reg_id), Address(SP, stack_location.GetStackIndex()));
+}
+
CodeGeneratorARM::CodeGeneratorARM(HGraph* graph)
: CodeGenerator(graph, kNumberOfRegIds),
location_builder_(graph, this),
@@ -577,7 +587,7 @@ void LocationsBuilderARM::VisitIf(HIf* if_instr) {
DCHECK(cond->IsCondition());
HCondition* condition = cond->AsCondition();
if (condition->NeedsMaterialization()) {
- locations->SetInAt(0, Location::Any());
+ locations->SetInAt(0, Location::RequiresRegister());
}
}
@@ -590,7 +600,7 @@ void InstructionCodeGeneratorARM::VisitIf(HIf* if_instr) {
DCHECK(if_instr->GetLocations()->InAt(0).IsRegister());
__ cmp(if_instr->GetLocations()->InAt(0).AsArm().AsCoreRegister(),
ShifterOperand(0));
- __ b(codegen_->GetLabelOf(if_instr->IfTrueSuccessor()), EQ);
+ __ b(codegen_->GetLabelOf(if_instr->IfTrueSuccessor()), NE);
} else {
// Condition has not been materialized, use its inputs as the comparison and its
// condition as the branch condition.
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 2480960..0902fb8 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -132,6 +132,8 @@ class CodeGeneratorARM : public CodeGenerator {
virtual void GenerateFrameExit() OVERRIDE;
virtual void Bind(Label* label) OVERRIDE;
virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
+ virtual void SaveCoreRegister(Location stack_location, uint32_t reg_id) OVERRIDE;
+ virtual void RestoreCoreRegister(Location stack_location, uint32_t reg_id) OVERRIDE;
virtual size_t GetWordSize() const OVERRIDE {
return kArmWordSize;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 3383cb2..0db4311 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -122,8 +122,10 @@ class SuspendCheckSlowPathX86 : public SlowPathCode {
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
__ Bind(GetEntryLabel());
+ codegen->SaveLiveRegisters(instruction_->GetLocations());
__ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pTestSuspend)));
codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ codegen->RestoreLiveRegisters(instruction_->GetLocations());
__ jmp(GetReturnLabel());
}
@@ -161,6 +163,14 @@ void CodeGeneratorX86::DumpFloatingPointRegister(std::ostream& stream, int reg)
stream << X86ManagedRegister::FromXmmRegister(XmmRegister(reg));
}
+void CodeGeneratorX86::SaveCoreRegister(Location stack_location, uint32_t reg_id) {
+ __ movl(Address(ESP, stack_location.GetStackIndex()), static_cast<Register>(reg_id));
+}
+
+void CodeGeneratorX86::RestoreCoreRegister(Location stack_location, uint32_t reg_id) {
+ __ movl(static_cast<Register>(reg_id), Address(ESP, stack_location.GetStackIndex()));
+}
+
CodeGeneratorX86::CodeGeneratorX86(HGraph* graph)
: CodeGenerator(graph, kNumberOfRegIds),
location_builder_(graph, this),
@@ -541,14 +551,18 @@ void InstructionCodeGeneratorX86::VisitIf(HIf* if_instr) {
DCHECK(cond->IsCondition());
HCondition* condition = cond->AsCondition();
if (condition->NeedsMaterialization()) {
- // Materialized condition, compare against 0
- Location lhs = if_instr->GetLocations()->InAt(0);
- if (lhs.IsRegister()) {
- __ cmpl(lhs.AsX86().AsCpuRegister(), Immediate(0));
- } else {
- __ cmpl(Address(ESP, lhs.GetStackIndex()), Immediate(0));
+ // Moves do not affect the eflags register, so if the condition is evaluated
+ // just before the if, we don't need to evaluate it again.
+ if (!condition->IsBeforeWhenDisregardMoves(if_instr)) {
+ // Materialized condition, compare against 0
+ Location lhs = if_instr->GetLocations()->InAt(0);
+ if (lhs.IsRegister()) {
+ __ cmpl(lhs.AsX86().AsCpuRegister(), Immediate(0));
+ } else {
+ __ cmpl(Address(ESP, lhs.GetStackIndex()), Immediate(0));
+ }
}
- __ j(kEqual, codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
+ __ j(kNotEqual, codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
} else {
Location lhs = condition->GetLocations()->InAt(0);
Location rhs = condition->GetLocations()->InAt(1);
@@ -625,6 +639,9 @@ void LocationsBuilderX86::VisitCondition(HCondition* comp) {
void InstructionCodeGeneratorX86::VisitCondition(HCondition* comp) {
if (comp->NeedsMaterialization()) {
LocationSummary* locations = comp->GetLocations();
+ Register reg = locations->Out().AsX86().AsCpuRegister();
+ // Clear register: setcc only sets the low byte.
+ __ xorl(reg, reg);
if (locations->InAt(1).IsRegister()) {
__ cmpl(locations->InAt(0).AsX86().AsCpuRegister(),
locations->InAt(1).AsX86().AsCpuRegister());
@@ -636,7 +653,7 @@ void InstructionCodeGeneratorX86::VisitCondition(HCondition* comp) {
__ cmpl(locations->InAt(0).AsX86().AsCpuRegister(),
Address(ESP, locations->InAt(1).GetStackIndex()));
}
- __ setb(X86Condition(comp->GetCondition()), locations->Out().AsX86().AsCpuRegister());
+ __ setb(X86Condition(comp->GetCondition()), reg);
}
}
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index f1be0ad..ffcaf60 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -134,6 +134,8 @@ class CodeGeneratorX86 : public CodeGenerator {
virtual void GenerateFrameExit() OVERRIDE;
virtual void Bind(Label* label) OVERRIDE;
virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
+ virtual void SaveCoreRegister(Location stack_location, uint32_t reg_id) OVERRIDE;
+ virtual void RestoreCoreRegister(Location stack_location, uint32_t reg_id) OVERRIDE;
virtual size_t GetWordSize() const OVERRIDE {
return kX86WordSize;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index ca03af8..56198af 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -103,8 +103,10 @@ class SuspendCheckSlowPathX86_64 : public SlowPathCode {
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
__ Bind(GetEntryLabel());
+ codegen->SaveLiveRegisters(instruction_->GetLocations());
__ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pTestSuspend), true));
codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ codegen->RestoreLiveRegisters(instruction_->GetLocations());
__ jmp(GetReturnLabel());
}
@@ -170,6 +172,14 @@ void CodeGeneratorX86_64::DumpFloatingPointRegister(std::ostream& stream, int re
stream << X86_64ManagedRegister::FromXmmRegister(FloatRegister(reg));
}
+void CodeGeneratorX86_64::SaveCoreRegister(Location stack_location, uint32_t reg_id) {
+ __ movq(Address(CpuRegister(RSP), stack_location.GetStackIndex()), CpuRegister(reg_id));
+}
+
+void CodeGeneratorX86_64::RestoreCoreRegister(Location stack_location, uint32_t reg_id) {
+ __ movq(CpuRegister(reg_id), Address(CpuRegister(RSP), stack_location.GetStackIndex()));
+}
+
CodeGeneratorX86_64::CodeGeneratorX86_64(HGraph* graph)
: CodeGenerator(graph, kNumberOfRegIds),
location_builder_(graph, this),
@@ -424,14 +434,18 @@ void InstructionCodeGeneratorX86_64::VisitIf(HIf* if_instr) {
DCHECK(cond->IsCondition());
HCondition* condition = cond->AsCondition();
if (condition->NeedsMaterialization()) {
- // Materialized condition, compare against 0.
- Location lhs = if_instr->GetLocations()->InAt(0);
- if (lhs.IsRegister()) {
- __ cmpl(lhs.AsX86_64().AsCpuRegister(), Immediate(0));
- } else {
- __ cmpl(Address(CpuRegister(RSP), lhs.GetStackIndex()), Immediate(0));
+ // Moves do not affect the eflags register, so if the condition is evaluated
+ // just before the if, we don't need to evaluate it again.
+ if (!condition->IsBeforeWhenDisregardMoves(if_instr)) {
+ // Materialized condition, compare against 0.
+ Location lhs = if_instr->GetLocations()->InAt(0);
+ if (lhs.IsRegister()) {
+ __ cmpl(lhs.AsX86_64().AsCpuRegister(), Immediate(0));
+ } else {
+ __ cmpl(Address(CpuRegister(RSP), lhs.GetStackIndex()), Immediate(0));
+ }
}
- __ j(kEqual, codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
+ __ j(kNotEqual, codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
} else {
Location lhs = condition->GetLocations()->InAt(0);
Location rhs = condition->GetLocations()->InAt(1);
@@ -505,6 +519,9 @@ void LocationsBuilderX86_64::VisitCondition(HCondition* comp) {
void InstructionCodeGeneratorX86_64::VisitCondition(HCondition* comp) {
if (comp->NeedsMaterialization()) {
LocationSummary* locations = comp->GetLocations();
+ CpuRegister reg = locations->Out().AsX86_64().AsCpuRegister();
+ // Clear register: setcc only sets the low byte.
+ __ xorq(reg, reg);
if (locations->InAt(1).IsRegister()) {
__ cmpq(locations->InAt(0).AsX86_64().AsCpuRegister(),
locations->InAt(1).AsX86_64().AsCpuRegister());
@@ -515,8 +532,7 @@ void InstructionCodeGeneratorX86_64::VisitCondition(HCondition* comp) {
__ cmpq(locations->InAt(0).AsX86_64().AsCpuRegister(),
Address(CpuRegister(RSP), locations->InAt(1).GetStackIndex()));
}
- __ setcc(X86_64Condition(comp->GetCondition()),
- comp->GetLocations()->Out().AsX86_64().AsCpuRegister());
+ __ setcc(X86_64Condition(comp->GetCondition()), reg);
}
}
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 78b60fe..ea21872 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -131,6 +131,8 @@ class CodeGeneratorX86_64 : public CodeGenerator {
virtual void GenerateFrameExit() OVERRIDE;
virtual void Bind(Label* label) OVERRIDE;
virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
+ virtual void SaveCoreRegister(Location stack_location, uint32_t reg_id) OVERRIDE;
+ virtual void RestoreCoreRegister(Location stack_location, uint32_t reg_id) OVERRIDE;
virtual size_t GetWordSize() const OVERRIDE {
return kX86_64WordSize;
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index ad9ed0c..e36b1cd 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -20,6 +20,8 @@
#include <map>
#include <sstream>
+#include "base/bit_vector-inl.h"
+
namespace art {
void GraphChecker::VisitBasicBlock(HBasicBlock* block) {
@@ -158,6 +160,73 @@ void SSAChecker::VisitBasicBlock(HBasicBlock* block) {
}
}
}
+
+ if (block->IsLoopHeader()) {
+ CheckLoop(block);
+ }
+}
+
+void SSAChecker::CheckLoop(HBasicBlock* loop_header) {
+ int id = loop_header->GetBlockId();
+
+ // Ensure the pre-header block is first in the list of
+ // predecessors of a loop header.
+ if (!loop_header->IsLoopPreHeaderFirstPredecessor()) {
+ std::stringstream error;
+ error << "Loop pre-header is not the first predecessor of the loop header "
+ << id << ".";
+ errors_.Insert(error.str());
+ }
+
+ // Ensure the loop header has only two predecessors and that only the
+ // second one is a back edge.
+ if (loop_header->GetPredecessors().Size() < 2) {
+ std::stringstream error;
+ error << "Loop header " << id << " has less than two predecessors.";
+ errors_.Insert(error.str());
+ } else if (loop_header->GetPredecessors().Size() > 2) {
+ std::stringstream error;
+ error << "Loop header " << id << " has more than two predecessors.";
+ errors_.Insert(error.str());
+ } else {
+ HLoopInformation* loop_information = loop_header->GetLoopInformation();
+ HBasicBlock* first_predecessor = loop_header->GetPredecessors().Get(0);
+ if (loop_information->IsBackEdge(first_predecessor)) {
+ std::stringstream error;
+ error << "First predecessor of loop header " << id << " is a back edge.";
+ errors_.Insert(error.str());
+ }
+ HBasicBlock* second_predecessor = loop_header->GetPredecessors().Get(1);
+ if (!loop_information->IsBackEdge(second_predecessor)) {
+ std::stringstream error;
+ error << "Second predecessor of loop header " << id
+ << " is not a back edge.";
+ errors_.Insert(error.str());
+ }
+ }
+
+ // Ensure there is only one back edge per loop.
+ size_t num_back_edges =
+ loop_header->GetLoopInformation()->GetBackEdges().Size();
+ if (num_back_edges != 1) {
+ std::stringstream error;
+ error << "Loop defined by header " << id << " has "
+ << num_back_edges << " back edge(s).";
+ errors_.Insert(error.str());
+ }
+
+ // Ensure all blocks in the loop are dominated by the loop header.
+ const ArenaBitVector& loop_blocks =
+ loop_header->GetLoopInformation()->GetBlocks();
+ for (uint32_t i : loop_blocks.Indexes()) {
+ HBasicBlock* loop_block = GetGraph()->GetBlocks().Get(i);
+ if (!loop_header->Dominates(loop_block)) {
+ std::stringstream error;
+ error << "Loop block " << loop_block->GetBlockId()
+ << " not dominated by loop header " << id;
+ errors_.Insert(error.str());
+ }
+ }
}
void SSAChecker::VisitInstruction(HInstruction* instruction) {
@@ -180,4 +249,48 @@ void SSAChecker::VisitInstruction(HInstruction* instruction) {
}
}
+void SSAChecker::VisitPhi(HPhi* phi) {
+ VisitInstruction(phi);
+
+ // Ensure the first input of a phi is not itself.
+ if (phi->InputAt(0) == phi) {
+ std::stringstream error;
+ error << "Loop phi " << phi->GetId()
+ << " in block " << phi->GetBlock()->GetBlockId()
+ << " is its own first input.";
+ errors_.Insert(error.str());
+ }
+
+ // Ensure the number of phi inputs is the same as the number of
+ // its predecessors.
+ const GrowableArray<HBasicBlock*>& predecessors =
+ phi->GetBlock()->GetPredecessors();
+ if (phi->InputCount() != predecessors.Size()) {
+ std::stringstream error;
+ error << "Phi " << phi->GetId()
+ << " in block " << phi->GetBlock()->GetBlockId()
+ << " has " << phi->InputCount() << " inputs, but block "
+ << phi->GetBlock()->GetBlockId() << " has "
+ << predecessors.Size() << " predecessors.";
+ errors_.Insert(error.str());
+ } else {
+ // Ensure phi input at index I either comes from the Ith
+ // predecessor or from a block that dominates this predecessor.
+ for (size_t i = 0, e = phi->InputCount(); i < e; ++i) {
+ HInstruction* input = phi->InputAt(i);
+ HBasicBlock* predecessor = predecessors.Get(i);
+ if (!(input->GetBlock() == predecessor
+ || input->GetBlock()->Dominates(predecessor))) {
+ std::stringstream error;
+ error << "Input " << input->GetId() << " at index " << i
+ << " of phi " << phi->GetId()
+ << " from block " << phi->GetBlock()->GetBlockId()
+ << " is not defined in predecessor number " << i
+ << " nor in a block dominating it.";
+ errors_.Insert(error.str());
+ }
+ }
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h
index 8ddd399..34a770b 100644
--- a/compiler/optimizing/graph_checker.h
+++ b/compiler/optimizing/graph_checker.h
@@ -67,9 +67,12 @@ class SSAChecker : public GraphChecker {
// Perform SSA form checks on `block`.
virtual void VisitBasicBlock(HBasicBlock* block) OVERRIDE;
+ // Loop-related checks from block `loop_header`.
+ void CheckLoop(HBasicBlock* loop_header);
- // Perform SSA form checks on `instruction`.
+ // Perform SSA form checks on instructions.
virtual void VisitInstruction(HInstruction* instruction) OVERRIDE;
+ virtual void VisitPhi(HPhi* phi) OVERRIDE;
private:
DISALLOW_COPY_AND_ASSIGN(SSAChecker);
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 7f64be4..0fb4737 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -82,6 +82,8 @@ class HGraphVisualizerPrinter : public HGraphVisitor {
}
char GetTypeId(Primitive::Type type) {
+ // Note that Primitive::Descriptor would not work for us
+ // because it does not handle reference types (that is kPrimNot).
switch (type) {
case Primitive::kPrimBoolean: return 'z';
case Primitive::kPrimByte: return 'b';
@@ -127,6 +129,12 @@ class HGraphVisualizerPrinter : public HGraphVisitor {
}
} else if (location.IsConstant()) {
output_ << "constant";
+ HConstant* constant = location.GetConstant();
+ if (constant->IsIntConstant()) {
+ output_ << " " << constant->AsIntConstant()->GetValue();
+ } else if (constant->IsLongConstant()) {
+ output_ << " " << constant->AsLongConstant()->GetValue();
+ }
} else if (location.IsInvalid()) {
output_ << "invalid";
} else if (location.IsStackSlot()) {
diff --git a/compiler/optimizing/locations.cc b/compiler/optimizing/locations.cc
index fce97bd..1c36cdf 100644
--- a/compiler/optimizing/locations.cc
+++ b/compiler/optimizing/locations.cc
@@ -28,7 +28,7 @@ LocationSummary::LocationSummary(HInstruction* instruction, CallKind call_kind)
call_kind_(call_kind),
stack_mask_(nullptr),
register_mask_(0),
- live_registers_(0) {
+ live_registers_() {
inputs_.SetSize(instruction->InputCount());
for (size_t i = 0; i < instruction->InputCount(); ++i) {
inputs_.Put(i, Location());
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index 041e85b..06623b6 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -266,6 +266,34 @@ class Location : public ValueObject {
uword value_;
};
+class RegisterSet : public ValueObject {
+ public:
+ RegisterSet() : core_registers_(0), floating_point_registers_(0) {}
+
+ void Add(Location loc) {
+ // TODO: floating point registers.
+ core_registers_ |= (1 << loc.reg().RegId());
+ }
+
+ bool ContainsCoreRegister(uint32_t id) {
+ return Contains(core_registers_, id);
+ }
+
+ bool ContainsFloatingPointRegister(uint32_t id) {
+ return Contains(floating_point_registers_, id);
+ }
+
+ static bool Contains(uint32_t register_set, uint32_t reg) {
+ return (register_set & (1 << reg)) != 0;
+ }
+
+ private:
+ uint32_t core_registers_;
+ uint32_t floating_point_registers_;
+
+ DISALLOW_COPY_AND_ASSIGN(RegisterSet);
+};
+
/**
* The code generator computes LocationSummary for each instruction so that
* the instruction itself knows what code to generate: where to find the inputs
@@ -327,6 +355,8 @@ class LocationSummary : public ArenaObject {
Location Out() const { return output_; }
bool CanCall() const { return call_kind_ != kNoCall; }
+ bool WillCall() const { return call_kind_ == kCall; }
+ bool OnlyCallsOnSlowPath() const { return call_kind_ == kCallOnSlowPath; }
bool NeedsSafepoint() const { return CanCall(); }
void SetStackBit(uint32_t index) {
@@ -337,14 +367,22 @@ class LocationSummary : public ArenaObject {
register_mask_ |= (1 << reg_id);
}
- void SetLiveRegister(uint32_t reg_id) {
- live_registers_ |= (1 << reg_id);
+ bool RegisterContainsObject(uint32_t reg_id) {
+ return RegisterSet::Contains(register_mask_, reg_id);
+ }
+
+ void AddLiveRegister(Location location) {
+ live_registers_.Add(location);
}
BitVector* GetStackMask() const {
return stack_mask_;
}
+ RegisterSet* GetLiveRegisters() {
+ return &live_registers_;
+ }
+
private:
GrowableArray<Location> inputs_;
GrowableArray<Location> temps_;
@@ -359,7 +397,7 @@ class LocationSummary : public ArenaObject {
uint32_t register_mask_;
// Registers that are in use at this position.
- uint32_t live_registers_;
+ RegisterSet live_registers_;
DISALLOW_COPY_AND_ASSIGN(LocationSummary);
};
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 72c5834..09412a9 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -555,14 +555,22 @@ bool HCondition::NeedsMaterialization() const {
return true;
}
- // TODO: should we allow intervening instructions with no side-effect between this condition
- // and the If instruction?
+ // TODO: if there is no intervening instructions with side-effect between this condition
+ // and the If instruction, we should move the condition just before the If.
if (GetNext() != user) {
return true;
}
return false;
}
+bool HCondition::IsBeforeWhenDisregardMoves(HIf* if_) const {
+ HInstruction* previous = if_->GetPrevious();
+ while (previous != nullptr && previous->IsParallelMove()) {
+ previous = previous->GetPrevious();
+ }
+ return previous == this;
+}
+
bool HInstruction::Equals(HInstruction* other) const {
if (!InstructionTypeEquals(other)) return false;
DCHECK_EQ(GetKind(), other->GetKind());
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 47c8eda..be6b355 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -383,6 +383,12 @@ class HBasicBlock : public ArenaObject {
return (loop_information_ != nullptr) && (loop_information_->GetHeader() == this);
}
+ bool IsLoopPreHeaderFirstPredecessor() const {
+ DCHECK(IsLoopHeader());
+ DCHECK(!GetPredecessors().IsEmpty());
+ return GetPredecessors().Get(0) == GetLoopInformation()->GetPreHeader();
+ }
+
HLoopInformation* GetLoopInformation() const {
return loop_information_;
}
@@ -606,7 +612,7 @@ class HInstruction : public ArenaObject {
bool IsInLoop() const { return block_->IsInLoop(); }
bool IsLoopHeaderPhi() { return IsPhi() && block_->IsLoopHeader(); }
- virtual size_t InputCount() const = 0;
+ virtual size_t InputCount() const = 0;
virtual HInstruction* InputAt(size_t i) const = 0;
virtual void Accept(HGraphVisitor* visitor) = 0;
@@ -1089,8 +1095,15 @@ class HCondition : public HBinaryOperation {
: HBinaryOperation(Primitive::kPrimBoolean, first, second) {}
virtual bool IsCommutative() { return true; }
+
+ // For register allocation purposes, returns whether this instruction needs to be
+ // materialized (that is, not just be in the processor flags).
bool NeedsMaterialization() const;
+ // For code generation purposes, returns whether this instruction is just before
+ // `if_`, and disregard moves in between.
+ bool IsBeforeWhenDisregardMoves(HIf* if_) const;
+
DECLARE_INSTRUCTION(Condition);
virtual IfCondition GetCondition() const = 0;
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 9ba75b8..1ac9b78 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -45,7 +45,8 @@ RegisterAllocator::RegisterAllocator(ArenaAllocator* allocator,
number_of_registers_(-1),
registers_array_(nullptr),
blocked_registers_(allocator->AllocArray<bool>(codegen->GetNumberOfRegisters())),
- reserved_out_slots_(0) {
+ reserved_out_slots_(0),
+ maximum_number_of_live_registers_(0) {
codegen->SetupBlockedRegisters(blocked_registers_);
physical_register_intervals_.SetSize(codegen->GetNumberOfRegisters());
// Always reserve for the current method and the graph's max out registers.
@@ -157,9 +158,34 @@ void RegisterAllocator::ProcessInstruction(HInstruction* instruction) {
}
}
+ bool core_register = (instruction->GetType() != Primitive::kPrimDouble)
+ && (instruction->GetType() != Primitive::kPrimFloat);
+
+ GrowableArray<LiveInterval*>& unhandled = core_register
+ ? unhandled_core_intervals_
+ : unhandled_fp_intervals_;
+
if (locations->CanCall()) {
- codegen_->MarkNotLeaf();
+ if (!instruction->IsSuspendCheck()) {
+ codegen_->MarkNotLeaf();
+ }
safepoints_.Add(instruction);
+ if (locations->OnlyCallsOnSlowPath()) {
+ // We add a synthesized range at this position to record the live registers
+ // at this position. Ideally, we could just update the safepoints when locations
+ // are updated, but we currently need to know the full stack size before updating
+ // locations (because of parameters and the fact that we don't have a frame pointer).
+ // And knowing the full stack size requires to know the maximum number of live
+ // registers at calls in slow paths.
+ // By adding the following interval in the algorithm, we can compute this
+ // maximum before updating locations.
+ LiveInterval* interval = LiveInterval::MakeSlowPathInterval(allocator_, instruction);
+ interval->AddRange(position, position + 1);
+ unhandled.Add(interval);
+ }
+ }
+
+ if (locations->WillCall()) {
// Block all registers.
for (size_t i = 0; i < codegen_->GetNumberOfCoreRegisters(); ++i) {
BlockRegister(Location::RegisterLocation(ManagedRegister(i)),
@@ -176,12 +202,6 @@ void RegisterAllocator::ProcessInstruction(HInstruction* instruction) {
}
}
- bool core_register = (instruction->GetType() != Primitive::kPrimDouble)
- && (instruction->GetType() != Primitive::kPrimFloat);
- GrowableArray<LiveInterval*>& unhandled = core_register
- ? unhandled_core_intervals_
- : unhandled_fp_intervals_;
-
LiveInterval* current = instruction->GetLiveInterval();
if (current == nullptr) return;
@@ -405,6 +425,14 @@ void RegisterAllocator::LinearScan() {
}
}
+ if (current->IsSlowPathSafepoint()) {
+ // Synthesized interval to record the maximum number of live registers
+ // at safepoints. No need to allocate a register for it.
+ maximum_number_of_live_registers_ =
+ std::max(maximum_number_of_live_registers_, active_.Size());
+ continue;
+ }
+
// (4) Try to find an available register.
bool success = TryAllocateFreeReg(current);
@@ -930,14 +958,13 @@ void RegisterAllocator::ConnectSiblings(LiveInterval* interval) {
LocationSummary* locations = safepoint->GetLocations();
if (!current->Covers(position)) continue;
- if (current->GetType() == Primitive::kPrimNot) {
- DCHECK(current->GetParent()->HasSpillSlot());
+ if ((current->GetType() == Primitive::kPrimNot) && current->GetParent()->HasSpillSlot()) {
locations->SetStackBit(current->GetParent()->GetSpillSlot() / kVRegSize);
}
switch (source.GetKind()) {
case Location::kRegister: {
- locations->SetLiveRegister(source.reg().RegId());
+ locations->AddLiveRegister(source);
if (current->GetType() == Primitive::kPrimNot) {
locations->SetRegisterBit(source.reg().RegId());
}
@@ -1020,7 +1047,8 @@ static Location FindLocationAt(LiveInterval* interval, size_t position) {
}
void RegisterAllocator::Resolve() {
- codegen_->ComputeFrameSize(spill_slots_.Size(), reserved_out_slots_);
+ codegen_->ComputeFrameSize(
+ spill_slots_.Size(), maximum_number_of_live_registers_, reserved_out_slots_);
// Adjust the Out Location of instructions.
// TODO: Use pointers of Location inside LiveInterval to avoid doing another iteration.
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index 7d397e3..3c305c8 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -179,6 +179,9 @@ class RegisterAllocator {
// Slots reserved for out arguments.
size_t reserved_out_slots_;
+ // The maximum live registers at safepoints.
+ size_t maximum_number_of_live_registers_;
+
FRIEND_TEST(RegisterAllocatorTest, FreeUntil);
DISALLOW_COPY_AND_ASSIGN(RegisterAllocator);
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 33b1f1f..dea6181 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -138,7 +138,8 @@ class LiveInterval : public ArenaObject {
HInstruction* defined_by = nullptr,
bool is_fixed = false,
int reg = kNoRegister,
- bool is_temp = false)
+ bool is_temp = false,
+ bool is_slow_path_safepoint = false)
: allocator_(allocator),
first_range_(nullptr),
last_range_(nullptr),
@@ -150,8 +151,14 @@ class LiveInterval : public ArenaObject {
spill_slot_(kNoSpillSlot),
is_fixed_(is_fixed),
is_temp_(is_temp),
+ is_slow_path_safepoint_(is_slow_path_safepoint),
defined_by_(defined_by) {}
+ static LiveInterval* MakeSlowPathInterval(ArenaAllocator* allocator, HInstruction* instruction) {
+ return new (allocator) LiveInterval(
+ allocator, Primitive::kPrimVoid, instruction, false, kNoRegister, false, true);
+ }
+
static LiveInterval* MakeFixedInterval(ArenaAllocator* allocator, int reg, Primitive::Type type) {
return new (allocator) LiveInterval(allocator, type, nullptr, true, reg, false);
}
@@ -163,6 +170,7 @@ class LiveInterval : public ArenaObject {
}
bool IsFixed() const { return is_fixed_; }
+ bool IsSlowPathSafepoint() const { return is_slow_path_safepoint_; }
void AddUse(HInstruction* instruction, size_t input_index, bool is_environment) {
// Set the use within the instruction.
@@ -480,6 +488,9 @@ class LiveInterval : public ArenaObject {
// Whether the interval is for a temporary.
const bool is_temp_;
+ // Whether the interval is for a safepoint that calls on slow path.
+ const bool is_slow_path_safepoint_;
+
// The instruction represented by this interval.
HInstruction* const defined_by_;
diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc
index d541a62..e02a182 100644
--- a/compiler/optimizing/ssa_phi_elimination.cc
+++ b/compiler/optimizing/ssa_phi_elimination.cc
@@ -83,10 +83,6 @@ void SsaDeadPhiElimination::Run() {
}
}
-static bool LoopPreHeaderIsFirstPredecessor(HBasicBlock* block) {
- return block->GetPredecessors().Get(0) == block->GetLoopInformation()->GetPreHeader();
-}
-
void SsaRedundantPhiElimination::Run() {
// Add all phis in the worklist.
for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
@@ -109,7 +105,7 @@ void SsaRedundantPhiElimination::Run() {
// A loop phi cannot have itself as the first phi. Note that this
// check relies on our simplification pass ensuring the pre-header
// block is first in the list of predecessors of the loop header.
- DCHECK(!phi->IsLoopHeaderPhi() || LoopPreHeaderIsFirstPredecessor(phi->GetBlock()));
+ DCHECK(!phi->IsLoopHeaderPhi() || phi->GetBlock()->IsLoopPreHeaderFirstPredecessor());
DCHECK_NE(phi, candidate);
for (size_t i = 1; i < phi->InputCount(); ++i) {
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index afc01dc..9e6e958 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1252,6 +1252,7 @@ static int dex2oat(int argc, char** argv) {
} else {
oat_file.reset(new File(oat_fd, oat_location));
oat_file->DisableAutoClose();
+ oat_file->SetLength(0);
}
if (oat_file.get() == nullptr) {
PLOG(ERROR) << "Failed to create oat file: " << oat_location;
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 61bc9ff..46b2e10 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -79,6 +79,7 @@ LIBART_COMMON_SRC_FILES := \
intern_table.cc \
interpreter/interpreter.cc \
interpreter/interpreter_common.cc \
+ interpreter/interpreter_goto_table_impl.cc \
interpreter/interpreter_switch_impl.cc \
java_vm_ext.cc \
jdwp/jdwp_event.cc \
@@ -200,10 +201,6 @@ LIBART_COMMON_SRC_FILES += \
entrypoints/quick/quick_throw_entrypoints.cc \
entrypoints/quick/quick_trampoline_entrypoints.cc
-# Source files that only compile with GCC.
-LIBART_GCC_ONLY_SRC_FILES := \
- interpreter/interpreter_goto_table_impl.cc
-
LIBART_TARGET_LDFLAGS :=
LIBART_HOST_LDFLAGS :=
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index cb0fe0a..f927720 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -2950,7 +2950,7 @@ mirror::ArtMethod* ClassLinker::LoadMethod(Thread* self, const DexFile& dex_file
}
DCHECK(dst->IsArtMethod()) << PrettyDescriptor(dst->GetClass());
- const char* old_cause = self->StartAssertNoThreadSuspension("LoadMethod");
+ ScopedAssertNoThreadSuspension ants(self, "LoadMethod");
dst->SetDexMethodIndex(dex_method_idx);
dst->SetDeclaringClass(klass.Get());
dst->SetCodeItemOffset(it.GetMethodCodeItemOffset());
@@ -2997,7 +2997,6 @@ mirror::ArtMethod* ClassLinker::LoadMethod(Thread* self, const DexFile& dex_file
}
dst->SetAccessFlags(access_flags);
- self->EndAssertNoThreadSuspension(old_cause);
return dst;
}
@@ -3474,8 +3473,7 @@ void ClassLinker::MoveImageClassesToClassTable() {
if (!dex_cache_image_class_lookup_required_) {
return; // All dex cache classes are already in the class table.
}
- const char* old_no_suspend_cause =
- self->StartAssertNoThreadSuspension("Moving image classes to class table");
+ ScopedAssertNoThreadSuspension ants(self, "Moving image classes to class table");
mirror::ObjectArray<mirror::DexCache>* dex_caches = GetImageDexCaches();
std::string temp;
for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
@@ -3501,13 +3499,10 @@ void ClassLinker::MoveImageClassesToClassTable() {
}
}
dex_cache_image_class_lookup_required_ = false;
- self->EndAssertNoThreadSuspension(old_no_suspend_cause);
}
mirror::Class* ClassLinker::LookupClassFromImage(const char* descriptor) {
- Thread* self = Thread::Current();
- const char* old_no_suspend_cause =
- self->StartAssertNoThreadSuspension("Image class lookup");
+ ScopedAssertNoThreadSuspension ants(Thread::Current(), "Image class lookup");
mirror::ObjectArray<mirror::DexCache>* dex_caches = GetImageDexCaches();
for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
mirror::DexCache* dex_cache = dex_caches->Get(i);
@@ -3521,13 +3516,11 @@ mirror::Class* ClassLinker::LookupClassFromImage(const char* descriptor) {
uint16_t type_idx = dex_file->GetIndexForTypeId(*type_id);
mirror::Class* klass = dex_cache->GetResolvedType(type_idx);
if (klass != nullptr) {
- self->EndAssertNoThreadSuspension(old_no_suspend_cause);
return klass;
}
}
}
}
- self->EndAssertNoThreadSuspension(old_no_suspend_cause);
return nullptr;
}
@@ -5077,7 +5070,7 @@ bool ClassLinker::LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_
// we want a relatively stable order so that adding new fields
// minimizes disruption of C++ version such as Class and Method.
std::deque<mirror::ArtField*> grouped_and_sorted_fields;
- const char* old_no_suspend_cause = self->StartAssertNoThreadSuspension(
+ const char* old_no_suspend_cause = self->StartAssertNoThreadSuspension(
"Naked ArtField references in deque");
for (size_t i = 0; i < num_fields; i++) {
mirror::ArtField* f = fields->Get(i);
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index b1b157a..a9c4b4a 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -2003,7 +2003,7 @@ JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* p
if (error != JDWP::ERR_NONE) {
return JDWP::ERR_INVALID_OBJECT;
}
- const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroup");
+ ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroup");
// Okay, so it's an object, but is it actually a thread?
{
MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
@@ -2024,7 +2024,6 @@ JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* p
JDWP::ObjectId thread_group_id = gRegistry->Add(group);
expandBufAddObjectId(pReply, thread_group_id);
}
- soa.Self()->EndAssertNoThreadSuspension(old_cause);
return error;
}
@@ -2058,12 +2057,11 @@ JDWP::JdwpError Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id, JDWP::Ex
if (error != JDWP::ERR_NONE) {
return error;
}
- const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupName");
+ ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroupName");
mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
mirror::ArtField* f = c->FindInstanceField("name", "Ljava/lang/String;");
CHECK(f != nullptr);
mirror::String* s = reinterpret_cast<mirror::String*>(f->GetObject(thread_group));
- soa.Self()->EndAssertNoThreadSuspension(old_cause);
std::string thread_group_name(s->ToModifiedUtf8());
expandBufAddUtf8String(pReply, thread_group_name);
@@ -2077,14 +2075,15 @@ JDWP::JdwpError Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id, JDWP::
if (error != JDWP::ERR_NONE) {
return error;
}
- const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupParent");
- mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
- CHECK(c != nullptr);
- mirror::ArtField* f = c->FindInstanceField("parent", "Ljava/lang/ThreadGroup;");
- CHECK(f != nullptr);
- mirror::Object* parent = f->GetObject(thread_group);
- soa.Self()->EndAssertNoThreadSuspension(old_cause);
-
+ mirror::Object* parent;
+ {
+ ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroupParent");
+ mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
+ CHECK(c != nullptr);
+ mirror::ArtField* f = c->FindInstanceField("parent", "Ljava/lang/ThreadGroup;");
+ CHECK(f != nullptr);
+ parent = f->GetObject(thread_group);
+ }
JDWP::ObjectId parent_group_id = gRegistry->Add(parent);
expandBufAddObjectId(pReply, parent_group_id);
return JDWP::ERR_NONE;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index b744a62..864bb72 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -685,9 +685,8 @@ void Heap::CreateThreadPool() {
}
void Heap::VisitObjects(ObjectCallback callback, void* arg) {
- Thread* self = Thread::Current();
// GCs can move objects, so don't allow this.
- const char* old_cause = self->StartAssertNoThreadSuspension("Visiting objects");
+ ScopedAssertNoThreadSuspension ants(Thread::Current(), "Visiting objects");
if (bump_pointer_space_ != nullptr) {
// Visit objects in bump pointer space.
bump_pointer_space_->Walk(callback, arg);
@@ -704,7 +703,6 @@ void Heap::VisitObjects(ObjectCallback callback, void* arg) {
}
}
GetLiveBitmap()->Walk(callback, arg);
- self->EndAssertNoThreadSuspension(old_cause);
}
void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
@@ -1429,12 +1427,10 @@ class InstanceCounter {
void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
uint64_t* counts) {
// Can't do any GC in this function since this may move classes.
- Thread* self = Thread::Current();
- auto* old_cause = self->StartAssertNoThreadSuspension("CountInstances");
+ ScopedAssertNoThreadSuspension ants(Thread::Current(), "CountInstances");
InstanceCounter counter(classes, use_is_assignable_from, counts);
- WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ ReaderMutexLock mu(ants.Self(), *Locks::heap_bitmap_lock_);
VisitObjects(InstanceCounter::Callback, &counter);
- self->EndAssertNoThreadSuspension(old_cause);
}
class InstanceCollector {
@@ -1447,8 +1443,7 @@ class InstanceCollector {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
DCHECK(arg != nullptr);
InstanceCollector* instance_collector = reinterpret_cast<InstanceCollector*>(arg);
- mirror::Class* instance_class = obj->GetClass();
- if (instance_class == instance_collector->class_) {
+ if (obj->GetClass() == instance_collector->class_) {
if (instance_collector->max_count_ == 0 ||
instance_collector->instances_.size() < instance_collector->max_count_) {
instance_collector->instances_.push_back(obj);
@@ -1457,8 +1452,8 @@ class InstanceCollector {
}
private:
- mirror::Class* class_;
- uint32_t max_count_;
+ const mirror::Class* const class_;
+ const uint32_t max_count_;
std::vector<mirror::Object*>& instances_;
DISALLOW_COPY_AND_ASSIGN(InstanceCollector);
};
@@ -1466,12 +1461,10 @@ class InstanceCollector {
void Heap::GetInstances(mirror::Class* c, int32_t max_count,
std::vector<mirror::Object*>& instances) {
// Can't do any GC in this function since this may move classes.
- Thread* self = Thread::Current();
- auto* old_cause = self->StartAssertNoThreadSuspension("GetInstances");
+ ScopedAssertNoThreadSuspension ants(Thread::Current(), "GetInstances");
InstanceCollector collector(c, max_count, instances);
- WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ ReaderMutexLock mu(ants.Self(), *Locks::heap_bitmap_lock_);
VisitObjects(&InstanceCollector::Callback, &collector);
- self->EndAssertNoThreadSuspension(old_cause);
}
class ReferringObjectsFinder {
@@ -1504,8 +1497,8 @@ class ReferringObjectsFinder {
}
private:
- mirror::Object* object_;
- uint32_t max_count_;
+ const mirror::Object* const object_;
+ const uint32_t max_count_;
std::vector<mirror::Object*>& referring_objects_;
DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
};
@@ -1513,12 +1506,10 @@ class ReferringObjectsFinder {
void Heap::GetReferringObjects(mirror::Object* o, int32_t max_count,
std::vector<mirror::Object*>& referring_objects) {
// Can't do any GC in this function since this may move the object o.
- Thread* self = Thread::Current();
- auto* old_cause = self->StartAssertNoThreadSuspension("GetReferringObjects");
+ ScopedAssertNoThreadSuspension ants(Thread::Current(), "GetReferringObjects");
ReferringObjectsFinder finder(o, max_count, referring_objects);
- WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ ReaderMutexLock mu(ants.Self(), *Locks::heap_bitmap_lock_);
VisitObjects(&ReferringObjectsFinder::Callback, &finder);
- self->EndAssertNoThreadSuspension(old_cause);
}
void Heap::CollectGarbage(bool clear_soft_references) {
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 7e685e8..cd8cce0 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -316,35 +316,7 @@ enum InterpreterImplKind {
kComputedGotoImplKind // Computed-goto-based interpreter implementation.
};
-#if !defined(__clang__)
static constexpr InterpreterImplKind kInterpreterImplKind = kComputedGotoImplKind;
-#else
-// Clang 3.4 fails to build the goto interpreter implementation.
-static constexpr InterpreterImplKind kInterpreterImplKind = kSwitchImpl;
-template<bool do_access_check, bool transaction_active>
-JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register) {
- LOG(FATAL) << "UNREACHABLE";
- exit(0);
-}
-// Explicit definitions of ExecuteGotoImpl.
-template<> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-JValue ExecuteGotoImpl<true, false>(Thread* self, MethodHelper& mh,
- const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register);
-template<> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-JValue ExecuteGotoImpl<false, false>(Thread* self, MethodHelper& mh,
- const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register);
-template<> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-JValue ExecuteGotoImpl<true, true>(Thread* self, MethodHelper& mh,
- const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register);
-template<> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-JValue ExecuteGotoImpl<false, true>(Thread* self, MethodHelper& mh,
- const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register);
-#endif
static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register)
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 003815e..65a7919 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -292,7 +292,7 @@ static jbyte IsDexOptNeededForFile(const std::string& oat_filename, const char*
std::unique_ptr<const OatFile> oat_file(OatFile::Open(oat_filename, oat_filename, nullptr,
false, &error_msg));
if (oat_file.get() == nullptr) {
- if (kVerboseLogging) {
+ if (kReasonLogging) {
LOG(INFO) << "DexFile_isDexOptNeeded failed to open oat file '" << oat_filename
<< "' for file location '" << filename << "': " << error_msg;
}
@@ -319,13 +319,13 @@ static jbyte IsDexOptNeededForFile(const std::string& oat_filename, const char*
return kUpToDate;
} else if (should_relocate_if_possible &&
ClassLinker::VerifyOatImageChecksum(oat_file.get(), target_instruction_set)) {
- if (kVerboseLogging) {
+ if (kReasonLogging) {
LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
<< " needs to be relocated for " << filename;
}
return kPatchoatNeeded;
} else {
- if (kVerboseLogging) {
+ if (kReasonLogging) {
LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
<< " is out of date for " << filename;
}
@@ -343,13 +343,13 @@ static jbyte IsDexOptNeededForFile(const std::string& oat_filename, const char*
} else if (location_checksum == oat_dex_file->GetDexFileLocationChecksum()
&& should_relocate_if_possible
&& ClassLinker::VerifyOatImageChecksum(oat_file.get(), target_instruction_set)) {
- if (kVerboseLogging) {
+ if (kReasonLogging) {
LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
<< " needs to be relocated for " << filename;
}
return kPatchoatNeeded;
} else {
- if (kVerboseLogging) {
+ if (kReasonLogging) {
LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
<< " is out of date for " << filename;
}
@@ -357,7 +357,7 @@ static jbyte IsDexOptNeededForFile(const std::string& oat_filename, const char*
}
}
} else {
- if (kVerboseLogging) {
+ if (kReasonLogging) {
LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
<< " does not contain " << filename;
}
@@ -367,9 +367,10 @@ static jbyte IsDexOptNeededForFile(const std::string& oat_filename, const char*
static jbyte IsDexOptNeededInternal(JNIEnv* env, const char* filename,
const char* pkgname, const char* instruction_set, const jboolean defer) {
- // TODO disable this logging.
- const bool kVerboseLogging = false; // Spammy logging.
- const bool kReasonLogging = true; // Logging of reason for returning JNI_TRUE.
+ // Spammy logging for kUpToDate
+ const bool kVerboseLogging = false;
+ // Logging of reason for returning kDexoptNeeded or kPatchoatNeeded.
+ const bool kReasonLogging = true;
if ((filename == nullptr) || !OS::FileExists(filename)) {
LOG(ERROR) << "DexFile_isDexOptNeeded file '" << filename << "' does not exist";
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index c3c8c25..e469126 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -111,15 +111,17 @@ static void ZygoteHooks_nativePostForkChild(JNIEnv* env, jclass, jlong token, ji
thread->InitAfterFork();
EnableDebugFeatures(debug_flags);
- Runtime::NativeBridgeAction action = Runtime::NativeBridgeAction::kUnload;
if (instruction_set != nullptr) {
ScopedUtfChars isa_string(env, instruction_set);
InstructionSet isa = GetInstructionSetFromString(isa_string.c_str());
+ Runtime::NativeBridgeAction action = Runtime::NativeBridgeAction::kUnload;
if (isa != kNone && isa != kRuntimeISA) {
action = Runtime::NativeBridgeAction::kInitialize;
}
+ Runtime::Current()->DidForkFromZygote(env, action, isa_string.c_str());
+ } else {
+ Runtime::Current()->DidForkFromZygote(env, Runtime::NativeBridgeAction::kUnload, nullptr);
}
- Runtime::Current()->DidForkFromZygote(action);
}
static JNINativeMethod gMethods[] = {
diff --git a/runtime/native_bridge_art_interface.cc b/runtime/native_bridge_art_interface.cc
index 453c92f..fcd11ed 100644
--- a/runtime/native_bridge_art_interface.cc
+++ b/runtime/native_bridge_art_interface.cc
@@ -16,6 +16,9 @@
#include "native_bridge_art_interface.h"
+#include "nativebridge/native_bridge.h"
+
+#include "base/logging.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "scoped_thread_state_change.h"
@@ -91,4 +94,40 @@ uint32_t GetNativeMethods(JNIEnv* env, jclass clazz, JNINativeMethod* methods,
return count;
}
+// Native bridge library runtime callbacks. They represent the runtime interface to native bridge.
+//
+// The interface is expected to expose the following methods:
+// getMethodShorty(): in the case of native method calling JNI native function CallXXXXMethodY(),
+// native bridge calls back to VM for the shorty of the method so that it can prepare based on
+// host calling convention.
+// getNativeMethodCount() and getNativeMethods(): in case of JNI function UnregisterNatives(),
+// native bridge can call back to get all native methods of specified class so that all
+// corresponding trampolines can be destroyed.
+static android::NativeBridgeRuntimeCallbacks native_bridge_art_callbacks_ {
+ GetMethodShorty, GetNativeMethodCount, GetNativeMethods
+};
+
+void LoadNativeBridge(std::string& native_bridge_library_filename) {
+ android::LoadNativeBridge(native_bridge_library_filename.c_str(), &native_bridge_art_callbacks_);
+ VLOG(startup) << "Runtime::Setup native bridge library: "
+ << (native_bridge_library_filename.empty() ? "(empty)" : native_bridge_library_filename);
+}
+
+void PreInitializeNativeBridge(std::string dir) {
+ VLOG(startup) << "Runtime::Pre-initialize native bridge";
+ if (unshare(CLONE_NEWNS) == -1) {
+ LOG(WARNING) << "Could not create mount namespace.";
+ return;
+ }
+ android::PreInitializeNativeBridge(dir.c_str(), GetInstructionSetString(kRuntimeISA));
+}
+
+void InitializeNativeBridge(JNIEnv* env, const char* instruction_set) {
+ android::InitializeNativeBridge(env, instruction_set);
+}
+
+void UnloadNativeBridge() {
+ android::UnloadNativeBridge();
+}
+
}; // namespace art
diff --git a/runtime/native_bridge_art_interface.h b/runtime/native_bridge_art_interface.h
index 08735c8..42f0ed2 100644
--- a/runtime/native_bridge_art_interface.h
+++ b/runtime/native_bridge_art_interface.h
@@ -19,15 +19,21 @@
#include <jni.h>
#include <stdint.h>
+#include <string>
namespace art {
-const char* GetMethodShorty(JNIEnv* env, jmethodID mid);
+// Mirror libnativebridge interface. Done to have the ART callbacks out of line, and not require
+// the system/core header file in other files.
-uint32_t GetNativeMethodCount(JNIEnv* env, jclass clazz);
+void LoadNativeBridge(std::string& native_bridge_library_filename);
-uint32_t GetNativeMethods(JNIEnv* env, jclass clazz, JNINativeMethod* methods,
- uint32_t method_count);
+// This is mostly for testing purposes, as in a full system this is called by Zygote code.
+void PreInitializeNativeBridge(std::string dir);
+
+void InitializeNativeBridge(JNIEnv* env, const char* instruction_set);
+
+void UnloadNativeBridge();
}; // namespace art
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 8386cc0..3432aa8 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -144,8 +144,7 @@ Runtime::Runtime()
target_sdk_version_(0),
implicit_null_checks_(false),
implicit_so_checks_(false),
- implicit_suspend_checks_(false),
- native_bridge_art_callbacks_({GetMethodShorty, GetNativeMethodCount, GetNativeMethods}) {
+ implicit_suspend_checks_(false) {
}
Runtime::~Runtime() {
@@ -419,18 +418,23 @@ bool Runtime::Start() {
Thread::FinishStartup();
+ system_class_loader_ = CreateSystemClassLoader();
+
if (is_zygote_) {
if (!InitZygote()) {
return false;
}
} else {
- DidForkFromZygote(NativeBridgeAction::kInitialize);
+ bool have_native_bridge = !native_bridge_library_filename_.empty();
+ if (have_native_bridge) {
+ PreInitializeNativeBridge(".");
+ }
+ DidForkFromZygote(self->GetJniEnv(), have_native_bridge ? NativeBridgeAction::kInitialize :
+ NativeBridgeAction::kUnload, GetInstructionSetString(kRuntimeISA));
}
StartDaemonThreads();
- system_class_loader_ = CreateSystemClassLoader();
-
{
ScopedObjectAccess soa(self);
self->GetJniEnv()->locals.AssertEmpty();
@@ -502,16 +506,16 @@ bool Runtime::InitZygote() {
#endif
}
-void Runtime::DidForkFromZygote(NativeBridgeAction action) {
+void Runtime::DidForkFromZygote(JNIEnv* env, NativeBridgeAction action, const char* isa) {
is_zygote_ = false;
switch (action) {
case NativeBridgeAction::kUnload:
- android::UnloadNativeBridge();
+ UnloadNativeBridge();
break;
case NativeBridgeAction::kInitialize:
- android::InitializeNativeBridge();
+ InitializeNativeBridge(env, isa);
break;
}
@@ -878,10 +882,7 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
// DidForkFromZygote(kInitialize) -> try to initialize any native bridge given.
// No-op wrt native bridge.
native_bridge_library_filename_ = options->native_bridge_library_filename_;
- android::LoadNativeBridge(native_bridge_library_filename_.c_str(), &native_bridge_art_callbacks_);
- VLOG(startup) << "Runtime::Setup native bridge library: "
- << (native_bridge_library_filename_.empty() ?
- "(empty)" : native_bridge_library_filename_);
+ LoadNativeBridge(native_bridge_library_filename_);
VLOG(startup) << "Runtime::Init exiting";
return true;
diff --git a/runtime/runtime.h b/runtime/runtime.h
index f9c017b..84e40ad 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -32,7 +32,6 @@
#include "instrumentation.h"
#include "instruction_set.h"
#include "jobject_comparator.h"
-#include "nativebridge/native_bridge.h"
#include "object_callbacks.h"
#include "offsets.h"
#include "profiler_options.h"
@@ -398,7 +397,7 @@ class Runtime {
};
void PreZygoteFork();
bool InitZygote();
- void DidForkFromZygote(NativeBridgeAction action);
+ void DidForkFromZygote(JNIEnv* env, NativeBridgeAction action, const char* isa);
const instrumentation::Instrumentation* GetInstrumentation() const {
return &instrumentation_;
@@ -648,17 +647,6 @@ class Runtime {
// the native bridge to load it and then gets the trampoline for the entry to native activity.
std::string native_bridge_library_filename_;
- // Native bridge library runtime callbacks. They represent the runtime interface to native bridge.
- //
- // The interface is expected to expose the following methods:
- // getMethodShorty(): in the case of native method calling JNI native function CallXXXXMethodY(),
- // native bridge calls back to VM for the shorty of the method so that it can prepare based on
- // host calling convention.
- // getNativeMethodCount() and getNativeMethods(): in case of JNI function UnregisterNatives(),
- // native bridge can call back to get all native methods of specified class so that all
- // corresponding trampolines can be destroyed.
- android::NativeBridgeRuntimeCallbacks native_bridge_art_callbacks_;
-
DISALLOW_COPY_AND_ASSIGN(Runtime);
};
diff --git a/runtime/thread.h b/runtime/thread.h
index d96b50b..164eb86 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -1180,6 +1180,23 @@ class Thread {
DISALLOW_COPY_AND_ASSIGN(Thread);
};
+class ScopedAssertNoThreadSuspension {
+ public:
+ ScopedAssertNoThreadSuspension(Thread* self, const char* cause)
+ : self_(self), old_cause_(self->StartAssertNoThreadSuspension(cause)) {
+ }
+ ~ScopedAssertNoThreadSuspension() {
+ self_->EndAssertNoThreadSuspension(old_cause_);
+ }
+ Thread* Self() {
+ return self_;
+ }
+
+ private:
+ Thread* const self_;
+ const char* old_cause_;
+};
+
std::ostream& operator<<(std::ostream& os, const Thread& thread);
std::ostream& operator<<(std::ostream& os, const ThreadState& state);
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index f28d488..45e8b6a 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -650,6 +650,11 @@ bool MethodVerifier::ScanTryCatchBlocks() {
<< "exception handler starts at bad address (" << dex_pc << ")";
return false;
}
+ if (!CheckNotMoveResult(code_item_->insns_, dex_pc)) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD)
+ << "exception handler begins with move-result* (" << dex_pc << ")";
+ return false;
+ }
insn_flags_[dex_pc].SetBranchTarget();
// Ensure exception types are resolved so that they don't need resolution to be delivered,
// unresolved exception types will be ignored by exception delivery
@@ -2766,7 +2771,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
return false;
}
DCHECK_EQ(isConditional, (opcode_flags & Instruction::kContinue) != 0);
- if (!CheckNotMoveException(code_item_->insns_, work_insn_idx_ + branch_target)) {
+ if (!CheckNotMoveExceptionOrMoveResult(code_item_->insns_, work_insn_idx_ + branch_target)) {
return false;
}
/* update branch target, set "changed" if appropriate */
@@ -2812,7 +2817,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
(((int32_t) switch_insns[offset_to_targets + targ * 2 + 1]) << 16);
abs_offset = work_insn_idx_ + offset;
DCHECK_LT(abs_offset, code_item_->insns_size_in_code_units_);
- if (!CheckNotMoveException(code_item_->insns_, abs_offset)) {
+ if (!CheckNotMoveExceptionOrMoveResult(code_item_->insns_, abs_offset)) {
return false;
}
if (!UpdateRegisters(abs_offset, work_line_.get(), false)) {
@@ -4001,6 +4006,19 @@ bool MethodVerifier::CheckNotMoveException(const uint16_t* insns, int insn_idx)
return true;
}
+bool MethodVerifier::CheckNotMoveResult(const uint16_t* insns, int insn_idx) {
+ if (((insns[insn_idx] & 0xff) >= Instruction::MOVE_RESULT) &&
+ ((insns[insn_idx] & 0xff) <= Instruction::MOVE_RESULT_OBJECT)) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid use of move-result*";
+ return false;
+ }
+ return true;
+}
+
+bool MethodVerifier::CheckNotMoveExceptionOrMoveResult(const uint16_t* insns, int insn_idx) {
+ return (CheckNotMoveException(insns, insn_idx) && CheckNotMoveResult(insns, insn_idx));
+}
+
bool MethodVerifier::UpdateRegisters(uint32_t next_insn, RegisterLine* merge_line,
bool update_merge_line) {
bool changed = true;
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 87acb20..9f5efe8 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -612,6 +612,21 @@ class MethodVerifier {
bool CheckNotMoveException(const uint16_t* insns, int insn_idx);
/*
+ * Verify that the target instruction is not "move-result". It is important that we cannot
+ * branch to move-result instructions, but we have to make this a distinct check instead of
+ * adding it to CheckNotMoveException, because it is legal to continue into "move-result"
+ * instructions - as long as the previous instruction was an invoke, which is checked elsewhere.
+ */
+ bool CheckNotMoveResult(const uint16_t* insns, int insn_idx);
+
+ /*
+ * Verify that the target instruction is not "move-result" or "move-exception". This is to
+ * be used when checking branch and switch instructions, but not instructions that can
+ * continue.
+ */
+ bool CheckNotMoveExceptionOrMoveResult(const uint16_t* insns, int insn_idx);
+
+ /*
* Control can transfer to "next_insn". Merge the registers from merge_line into the table at
* next_insn, and set the changed flag on the target address if any of the registers were changed.
* In the case of fall-through, update the merge line on a change as its the working line for the
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index d429dfd..34d6caa 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -340,6 +340,8 @@ class PrimitiveType : public RegType {
public:
PrimitiveType(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ bool HasClassVirtual() const OVERRIDE { return true; }
};
class Cat1Type : public PrimitiveType {
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index bffec4b..fb0616d 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -288,6 +288,7 @@ const Type* RegTypeCache::CreatePrimitiveTypeInstance(const std::string& descrip
if (!descriptor.empty()) {
klass = art::Runtime::Current()->GetClassLinker()->FindSystemClass(Thread::Current(),
descriptor.c_str());
+ DCHECK(klass != nullptr);
}
const Type* entry = Type::CreateInstance(klass, descriptor, RegTypeCache::primitive_count_);
RegTypeCache::primitive_count_++;
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index aad3b5a..2fecc8b 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -112,6 +112,7 @@ TEST_F(RegTypeTest, Primitives) {
EXPECT_FALSE(bool_reg_type.IsDoubleTypes());
EXPECT_TRUE(bool_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(bool_reg_type.IsNonZeroReferenceTypes());
+ EXPECT_TRUE(bool_reg_type.HasClass());
const RegType& byte_reg_type = cache.Byte();
EXPECT_FALSE(byte_reg_type.IsUndefined());
@@ -144,6 +145,7 @@ TEST_F(RegTypeTest, Primitives) {
EXPECT_FALSE(byte_reg_type.IsDoubleTypes());
EXPECT_TRUE(byte_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(byte_reg_type.IsNonZeroReferenceTypes());
+ EXPECT_TRUE(byte_reg_type.HasClass());
const RegType& char_reg_type = cache.Char();
EXPECT_FALSE(char_reg_type.IsUndefined());
@@ -176,6 +178,7 @@ TEST_F(RegTypeTest, Primitives) {
EXPECT_FALSE(char_reg_type.IsDoubleTypes());
EXPECT_TRUE(char_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(char_reg_type.IsNonZeroReferenceTypes());
+ EXPECT_TRUE(char_reg_type.HasClass());
const RegType& short_reg_type = cache.Short();
EXPECT_FALSE(short_reg_type.IsUndefined());
@@ -208,6 +211,7 @@ TEST_F(RegTypeTest, Primitives) {
EXPECT_FALSE(short_reg_type.IsDoubleTypes());
EXPECT_TRUE(short_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(short_reg_type.IsNonZeroReferenceTypes());
+ EXPECT_TRUE(short_reg_type.HasClass());
const RegType& int_reg_type = cache.Integer();
EXPECT_FALSE(int_reg_type.IsUndefined());
@@ -240,6 +244,7 @@ TEST_F(RegTypeTest, Primitives) {
EXPECT_FALSE(int_reg_type.IsDoubleTypes());
EXPECT_TRUE(int_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(int_reg_type.IsNonZeroReferenceTypes());
+ EXPECT_TRUE(int_reg_type.HasClass());
const RegType& long_reg_type = cache.LongLo();
EXPECT_FALSE(long_reg_type.IsUndefined());
@@ -272,6 +277,7 @@ TEST_F(RegTypeTest, Primitives) {
EXPECT_FALSE(long_reg_type.IsDoubleTypes());
EXPECT_FALSE(long_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(long_reg_type.IsNonZeroReferenceTypes());
+ EXPECT_TRUE(long_reg_type.HasClass());
const RegType& float_reg_type = cache.Float();
EXPECT_FALSE(float_reg_type.IsUndefined());
@@ -304,6 +310,7 @@ TEST_F(RegTypeTest, Primitives) {
EXPECT_FALSE(float_reg_type.IsDoubleTypes());
EXPECT_FALSE(float_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(float_reg_type.IsNonZeroReferenceTypes());
+ EXPECT_TRUE(float_reg_type.HasClass());
const RegType& double_reg_type = cache.DoubleLo();
EXPECT_FALSE(double_reg_type.IsUndefined());
@@ -336,6 +343,7 @@ TEST_F(RegTypeTest, Primitives) {
EXPECT_TRUE(double_reg_type.IsDoubleTypes());
EXPECT_FALSE(double_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(double_reg_type.IsNonZeroReferenceTypes());
+ EXPECT_TRUE(double_reg_type.HasClass());
}
class RegTypeReferenceTest : public CommonRuntimeTest {};
diff --git a/test/115-native-bridge/expected.txt b/test/115-native-bridge/expected.txt
index 808d968..a5eedc6 100644
--- a/test/115-native-bridge/expected.txt
+++ b/test/115-native-bridge/expected.txt
@@ -1,4 +1,5 @@
Native bridge initialized.
+Checking for getEnvValues.
Ready for native bridge tests.
Checking for support.
Getting trampoline for JNI_OnLoad with shorty (null).
diff --git a/test/115-native-bridge/nativebridge.cc b/test/115-native-bridge/nativebridge.cc
index 3acc643..442f99c 100644
--- a/test/115-native-bridge/nativebridge.cc
+++ b/test/115-native-bridge/nativebridge.cc
@@ -207,7 +207,8 @@ static NativeBridgeMethod* find_native_bridge_method(const char *name) {
}
// NativeBridgeCallbacks implementations
-extern "C" bool native_bridge_initialize(const android::NativeBridgeRuntimeCallbacks* art_cbs) {
+extern "C" bool native_bridge_initialize(const android::NativeBridgeRuntimeCallbacks* art_cbs,
+ const char* private_dir, const char* isa) {
if (art_cbs != nullptr) {
gNativeBridgeArtCallbacks = art_cbs;
printf("Native bridge initialized.\n");
@@ -263,11 +264,49 @@ extern "C" bool native_bridge_isSupported(const char* libpath) {
return strcmp(libpath, "libjavacore.so") != 0;
}
+namespace android {
+
+// Environment values required by the apps running with native bridge.
+struct NativeBridgeRuntimeValues {
+ const char* os_arch;
+ const char* cpu_abi;
+ const char* cpu_abi2;
+ const char* *supported_abis;
+ int32_t abi_count;
+};
+
+} // namespace android
+
+const char* supported_abis[] = {
+ "supported1", "supported2", "supported3"
+};
+
+const struct android::NativeBridgeRuntimeValues nb_env {
+ .os_arch = "os.arch",
+ .cpu_abi = "cpu_abi",
+ .cpu_abi2 = "cpu_abi2",
+ .supported_abis = supported_abis,
+ .abi_count = 3
+};
+
+extern "C" const struct android::NativeBridgeRuntimeValues* native_bridge_getAppEnv(
+ const char* abi) {
+ printf("Checking for getEnvValues.\n");
+
+ if (abi == nullptr) {
+ return nullptr;
+ }
+
+ return &nb_env;
+}
+
// "NativeBridgeItf" is effectively an API (it is the name of the symbol that will be loaded
// by the native bridge library).
android::NativeBridgeCallbacks NativeBridgeItf {
+ .version = 1,
.initialize = &native_bridge_initialize,
.loadLibrary = &native_bridge_loadLibrary,
.getTrampoline = &native_bridge_getTrampoline,
- .isSupported = &native_bridge_isSupported
+ .isSupported = &native_bridge_isSupported,
+ .getAppEnv = &native_bridge_getAppEnv
};
diff --git a/test/409-materialized-condition/expected.txt b/test/409-materialized-condition/expected.txt
new file mode 100644
index 0000000..a0796cd
--- /dev/null
+++ b/test/409-materialized-condition/expected.txt
@@ -0,0 +1,5 @@
+foo1
+In do nothing.
+In if.
+foo2
+In do nothing.
diff --git a/test/409-materialized-condition/info.txt b/test/409-materialized-condition/info.txt
new file mode 100644
index 0000000..898560d
--- /dev/null
+++ b/test/409-materialized-condition/info.txt
@@ -0,0 +1 @@
+Test that materialized conditions are evaluated correctly.
diff --git a/test/409-materialized-condition/src/Main.java b/test/409-materialized-condition/src/Main.java
new file mode 100644
index 0000000..0c179a9
--- /dev/null
+++ b/test/409-materialized-condition/src/Main.java
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ public static void doNothing(boolean b) {
+ System.out.println("In do nothing.");
+ }
+
+ public static void inIf() {
+ System.out.println("In if.");
+ }
+
+ public static int bar() {
+ return 42;
+ }
+
+ public static int foo1() {
+ int b = bar();
+ doNothing(b == 42);
+ // This second `b == 42` will be GVN'ed away.
+ if (b == 42) {
+ inIf();
+ return b;
+ }
+ return 0;
+ }
+
+ public static int foo2() {
+ int b = bar();
+ doNothing(b == 41);
+ // This second `b == 41` will be GVN'ed away.
+ if (b == 41) {
+ inIf();
+ return 0;
+ }
+ return b;
+ }
+
+ public static void main(String[] args) {
+ System.out.println("foo1");
+ int res = foo1();
+ if (res != 42) {
+ throw new Error("Unexpected return value for foo1: " + res + ", expected 42.");
+ }
+
+ System.out.println("foo2");
+ res = foo2();
+ if (res != 42) {
+ throw new Error("Unexpected return value for foo2: " + res + ", expected 42.");
+ }
+ }
+}