summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--compiler/dex/mir_analysis.cc5
-rw-r--r--compiler/dex/mir_graph.cc17
-rw-r--r--compiler/dex/mir_graph.h6
-rw-r--r--runtime/dex_file-inl.h1
-rw-r--r--runtime/dex_file.h1
-rw-r--r--runtime/dex_instruction.h5
-rw-r--r--runtime/entrypoints/quick/quick_jni_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc18
-rw-r--r--runtime/gc/accounting/atomic_stack.h4
-rw-r--r--runtime/gc/collector/semi_space.cc30
-rw-r--r--runtime/gc/heap-inl.h2
-rw-r--r--runtime/gc/heap.cc124
-rw-r--r--runtime/gc/heap.h18
-rw-r--r--runtime/gc/space/bump_pointer_space.cc8
-rw-r--r--runtime/gc/space/bump_pointer_space.h7
-rw-r--r--runtime/gc/space/dlmalloc_space.cc5
-rw-r--r--runtime/gc/space/dlmalloc_space.h1
-rw-r--r--runtime/gc/space/rosalloc_space.cc5
-rw-r--r--runtime/gc/space/rosalloc_space.h1
-rw-r--r--runtime/gc/space/space.h3
-rw-r--r--runtime/gc/space/zygote_space.cc4
-rw-r--r--runtime/gc/space/zygote_space.h3
-rw-r--r--runtime/jni_internal.cc1
-rw-r--r--runtime/mirror/array-inl.h1
-rw-r--r--runtime/runtime.cc7
-rw-r--r--runtime/runtime.h11
-rw-r--r--runtime/runtime_android.cc50
-rw-r--r--runtime/runtime_linux.cc10
-rw-r--r--runtime/thread.h7
-rw-r--r--runtime/utils.cc4
-rw-r--r--runtime/utils.h1
-rw-r--r--runtime/verifier/method_verifier.cc1
-rw-r--r--runtime/zip_archive.h1
33 files changed, 311 insertions, 53 deletions
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index 7ce8f69..8ef80fa 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -1004,6 +1004,11 @@ bool MIRGraph::SkipCompilation() {
return false;
}
+ // Contains a pattern we don't want to compile?
+ if (punt_to_interpreter_) {
+ return true;
+ }
+
if (compiler_filter == CompilerOptions::kInterpretOnly) {
LOG(WARNING) << "InterpretOnly should ideally be filtered out prior to parsing.";
return true;
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index e4550d1..2bfc154 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -86,7 +86,8 @@ MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
forward_branches_(0),
compiler_temps_(arena, 6, kGrowableArrayMisc),
num_non_special_compiler_temps_(0),
- max_available_non_special_compiler_temps_(0) {
+ max_available_non_special_compiler_temps_(0),
+ punt_to_interpreter_(false) {
try_block_addr_ = new (arena_) ArenaBitVector(arena_, 0, true /* expandable */);
max_available_special_compiler_temps_ = std::abs(static_cast<int>(kVRegNonSpecialTempBaseReg))
- std::abs(static_cast<int>(kVRegTempBaseReg));
@@ -610,6 +611,7 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
}
int flags = Instruction::FlagsOf(insn->dalvikInsn.opcode);
+ int verify_flags = Instruction::VerifyFlagsOf(insn->dalvikInsn.opcode);
uint64_t df_flags = oat_data_flow_attributes_[insn->dalvikInsn.opcode];
@@ -676,6 +678,19 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
} else if (flags & Instruction::kSwitch) {
cur_block = ProcessCanSwitch(cur_block, insn, current_offset_, width, flags);
}
+ if (verify_flags & Instruction::kVerifyVarArgRange) {
+ /*
+ * The Quick backend's runtime model includes a gap between a method's
+ * argument ("in") vregs and the rest of its vregs. Handling a range instruction
+ * which spans the gap is somewhat complicated, and should not happen
+ * in normal usage of dx. Punt to the interpreter.
+ */
+ int first_reg_in_range = insn->dalvikInsn.vC;
+ int last_reg_in_range = first_reg_in_range + insn->dalvikInsn.vA - 1;
+ if (IsInVReg(first_reg_in_range) != IsInVReg(last_reg_in_range)) {
+ punt_to_interpreter_ = true;
+ }
+ }
current_offset_ += width;
BasicBlock *next_block = FindBlock(current_offset_, /* split */ false, /* create */
false, /* immed_pred_block_p */ NULL);
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index d344055..28e9470 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -684,6 +684,11 @@ class MIRGraph {
return opcode >= static_cast<int>(kMirOpFirst);
}
+ // Is this vreg in the in set?
+ bool IsInVReg(int vreg) {
+ return (vreg >= cu_->num_regs);
+ }
+
void DumpCheckStats();
MIR* FindMoveResult(BasicBlock* bb, MIR* mir);
int SRegToVReg(int ssa_reg) const;
@@ -917,6 +922,7 @@ class MIRGraph {
size_t num_non_special_compiler_temps_;
size_t max_available_non_special_compiler_temps_;
size_t max_available_special_compiler_temps_;
+ bool punt_to_interpreter_; // Difficult or not worthwhile - just interpret.
friend class LocalValueNumberingTest;
};
diff --git a/runtime/dex_file-inl.h b/runtime/dex_file-inl.h
index a459308..e095c48 100644
--- a/runtime/dex_file-inl.h
+++ b/runtime/dex_file-inl.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_DEX_FILE_INL_H_
#include "base/logging.h"
+#include "base/stringpiece.h"
#include "dex_file.h"
#include "leb128.h"
#include "utils.h"
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index e9d18b5..70baeed 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -22,7 +22,6 @@
#include "base/logging.h"
#include "base/mutex.h"
-#include "base/stringpiece.h"
#include "globals.h"
#include "invoke_type.h"
#include "jni.h"
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index c434cdd..4352c4a 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -422,6 +422,11 @@ class Instruction {
return kInstructionFlags[opcode];
}
+ // Return the verify flags for the given opcode.
+ static int VerifyFlagsOf(Code opcode) {
+ return kInstructionVerifyFlags[opcode];
+ }
+
// Returns true if this instruction is a branch.
bool IsBranch() const {
return (kInstructionFlags[Opcode()] & kBranch) != 0;
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 59da7a0..737fa3e 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -92,6 +92,7 @@ extern mirror::Object* JniMethodEndWithReference(jobject result, uint32_t saved_
}
CheckReferenceResult(o, self);
}
+ VerifyObject(o);
return o;
}
@@ -109,6 +110,7 @@ extern mirror::Object* JniMethodEndWithReferenceSynchronized(jobject result,
}
CheckReferenceResult(o, self);
}
+ VerifyObject(o);
return o;
}
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 7cbeb29..5339b5e 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -756,21 +756,25 @@ extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called,
RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa);
visitor.VisitArguments();
thread->EndAssertNoThreadSuspension(old_cause);
+ bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface;
// Resolve method filling in dex cache.
if (called->IsRuntimeMethod()) {
+ SirtRef<mirror::Object> sirt_receiver(soa.Self(), virtual_or_interface ? receiver : nullptr);
called = linker->ResolveMethod(dex_method_idx, caller, invoke_type);
+ receiver = sirt_receiver.get();
}
const void* code = NULL;
if (LIKELY(!thread->IsExceptionPending())) {
// Incompatible class change should have been handled in resolve method.
CHECK(!called->CheckIncompatibleClassChange(invoke_type));
- // Refine called method based on receiver.
- if (invoke_type == kVirtual) {
- called = receiver->GetClass()->FindVirtualMethodForVirtual(called);
- } else if (invoke_type == kInterface) {
- called = receiver->GetClass()->FindVirtualMethodForInterface(called);
- }
- if ((invoke_type == kVirtual) || (invoke_type == kInterface)) {
+ if (virtual_or_interface) {
+ // Refine called method based on receiver.
+ CHECK(receiver != nullptr) << invoke_type;
+ if (invoke_type == kVirtual) {
+ called = receiver->GetClass()->FindVirtualMethodForVirtual(called);
+ } else {
+ called = receiver->GetClass()->FindVirtualMethodForInterface(called);
+ }
// We came here because of sharpening. Ensure the dex cache is up-to-date on the method index
// of the sharpened method.
if (called->GetDexCacheResolvedMethods() == caller->GetDexCacheResolvedMethods()) {
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index d6f3228..c79b586 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_GC_ACCOUNTING_ATOMIC_STACK_H_
#define ART_RUNTIME_GC_ACCOUNTING_ATOMIC_STACK_H_
+#include <algorithm>
#include <string>
#include "atomic.h"
@@ -94,7 +95,8 @@ class AtomicStack {
if (kIsDebugBuild) {
// Sanity check that the memory is zero.
for (int32_t i = index; i < new_index; ++i) {
- DCHECK_EQ(begin_[i], static_cast<T>(0)) << "i=" << i << " index=" << index << " new_index=" << new_index;
+ DCHECK_EQ(begin_[i], static_cast<T>(0))
+ << "i=" << i << " index=" << index << " new_index=" << new_index;
}
}
return true;
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index a4c9dea..4668a19 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -61,7 +61,8 @@ namespace gc {
namespace collector {
static constexpr bool kProtectFromSpace = true;
-static constexpr bool kResetFromSpace = true;
+static constexpr bool kClearFromSpace = true;
+static constexpr bool kStoreStackTraces = false;
// TODO: Unduplicate logic.
void SemiSpace::ImmuneSpace(space::ContinuousSpace* space) {
@@ -169,6 +170,19 @@ void SemiSpace::ProcessReferences(Thread* self) {
}
void SemiSpace::MarkingPhase() {
+ if (kStoreStackTraces) {
+ Locks::mutator_lock_->AssertExclusiveHeld(self_);
+ // Store the stack traces into the runtime fault string in case we get a heap corruption
+ // related crash later.
+ ThreadState old_state = self_->SetStateUnsafe(kRunnable);
+ std::ostringstream oss;
+ Runtime* runtime = Runtime::Current();
+ runtime->GetThreadList()->DumpForSigQuit(oss);
+ runtime->GetThreadList()->DumpNativeStacks(oss);
+ runtime->SetFaultMessage(oss.str());
+ CHECK_EQ(self_->SetStateUnsafe(old_state), kRunnable);
+ }
+
if (generational_) {
if (gc_cause_ == kGcCauseExplicit || gc_cause_ == kGcCauseForNativeAlloc ||
clear_soft_references_) {
@@ -353,19 +367,17 @@ void SemiSpace::ReclaimPhase() {
TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
GetHeap()->UnBindBitmaps();
}
- // Release the memory used by the from space.
- if (kResetFromSpace) {
- // Clearing from space.
+ if (kClearFromSpace) {
+ // Release the memory used by the from space.
from_space_->Clear();
}
+ from_space_->Reset();
// Protect the from space.
- VLOG(heap)
- << "mprotect region " << reinterpret_cast<void*>(from_space_->Begin()) << " - "
- << reinterpret_cast<void*>(from_space_->Limit());
+ VLOG(heap) << "Protecting space " << *from_space_;
if (kProtectFromSpace) {
- mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_NONE);
+ from_space_->GetMemMap()->Protect(PROT_NONE);
} else {
- mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_READ);
+ from_space_->GetMemMap()->Protect(PROT_READ);
}
if (saved_bytes_ > 0) {
VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_);
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index e089ef2..89ded0b 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -256,7 +256,7 @@ inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) co
// Zygote resulting in it being prematurely freed.
// We can only do this for primitive objects since large objects will not be within the card table
// range. This also means that we rely on SetClass not dirtying the object's card.
- return byte_count >= kLargeObjectThreshold && have_zygote_space_ && c->IsPrimitiveArray();
+ return byte_count >= large_object_threshold_ && c->IsPrimitiveArray();
}
template <bool kGrow>
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 8d8cdd6..9ad21cf 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -98,6 +98,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
long_gc_log_threshold_(long_gc_log_threshold),
ignore_max_footprint_(ignore_max_footprint),
have_zygote_space_(false),
+ large_object_threshold_(std::numeric_limits<size_t>::max()), // Starts out disabled.
soft_reference_queue_(this),
weak_reference_queue_(this),
finalizer_reference_queue_(this),
@@ -159,11 +160,16 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
}
// If we aren't the zygote, switch to the default non zygote allocator. This may update the
// entrypoints.
- if (!Runtime::Current()->IsZygote() || !kMovingCollector) {
+ if (!Runtime::Current()->IsZygote()) {
ChangeCollector(post_zygote_collector_type_);
+ large_object_threshold_ = kDefaultLargeObjectThreshold;
} else {
- // We are the zygote, use bump pointer allocation + semi space collector.
- ChangeCollector(kCollectorTypeSS);
+ if (kMovingCollector) {
+ // We are the zygote, use bump pointer allocation + semi space collector.
+ ChangeCollector(kCollectorTypeSS);
+ } else {
+ ChangeCollector(post_zygote_collector_type_);
+ }
}
live_bitmap_.reset(new accounting::HeapBitmap(this));
@@ -312,6 +318,91 @@ void Heap::ChangeAllocator(AllocatorType allocator) {
}
}
+std::string Heap::SafeGetClassDescriptor(mirror::Class* klass) {
+ if (!IsValidContinuousSpaceObjectAddress(klass)) {
+ return StringPrintf("<non heap address klass %p>", klass);
+ }
+ mirror::Class* component_type = klass->GetComponentType<kVerifyNone>();
+ if (IsValidContinuousSpaceObjectAddress(component_type) && klass->IsArrayClass<kVerifyNone>()) {
+ std::string result("[");
+ result += SafeGetClassDescriptor(component_type);
+ return result;
+ } else if (UNLIKELY(klass->IsPrimitive<kVerifyNone>())) {
+ return Primitive::Descriptor(klass->GetPrimitiveType<kVerifyNone>());
+ } else if (UNLIKELY(klass->IsProxyClass())) {
+ return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(klass);
+ } else {
+ mirror::DexCache* dex_cache = klass->GetDexCache();
+ if (!IsValidContinuousSpaceObjectAddress(dex_cache)) {
+ return StringPrintf("<non heap address dex_cache %p>", dex_cache);
+ }
+ const DexFile* dex_file = dex_cache->GetDexFile();
+ uint16_t class_def_idx = klass->GetDexClassDefIndex();
+ if (class_def_idx == DexFile::kDexNoIndex16) {
+ return "<class def not found>";
+ }
+ const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
+ const DexFile::TypeId& type_id = dex_file->GetTypeId(class_def.class_idx_);
+ return dex_file->GetTypeDescriptor(type_id);
+ }
+}
+
+std::string Heap::SafePrettyTypeOf(mirror::Object* obj) {
+ if (obj == nullptr) {
+ return "null";
+ }
+ mirror::Class* klass = obj->GetClass<kVerifyNone>();
+ if (klass == nullptr) {
+ return "(class=null)";
+ }
+ std::string result(SafeGetClassDescriptor(klass));
+ if (obj->IsClass()) {
+ result += "<" + SafeGetClassDescriptor(obj->AsClass()) + ">";
+ }
+ return result;
+}
+
+void Heap::DumpObject(std::ostream& stream, mirror::Object* obj) {
+ if (obj == nullptr) {
+ stream << "(obj=null)";
+ return;
+ }
+ if (IsAligned<kObjectAlignment>(obj)) {
+ space::Space* space = nullptr;
+ // Don't use find space since it only finds spaces which actually contain objects instead of
+ // spaces which may contain objects (e.g. cleared bump pointer spaces).
+ for (const auto& cur_space : continuous_spaces_) {
+ if (cur_space->HasAddress(obj)) {
+ space = cur_space;
+ break;
+ }
+ }
+ if (space == nullptr) {
+ if (allocator_mem_map_.get() == nullptr || !allocator_mem_map_->HasAddress(obj)) {
+ stream << "obj " << obj << " not a valid heap address";
+ return;
+ } else if (allocator_mem_map_.get() != nullptr) {
+ allocator_mem_map_->Protect(PROT_READ | PROT_WRITE);
+ }
+ }
+ // Unprotect all the spaces.
+ for (const auto& space : continuous_spaces_) {
+ mprotect(space->Begin(), space->Capacity(), PROT_READ | PROT_WRITE);
+ }
+ stream << "Object " << obj;
+ if (space != nullptr) {
+ stream << " in space " << *space;
+ }
+ mirror::Class* klass = obj->GetClass();
+ stream << "\nclass=" << klass;
+ if (klass != nullptr) {
+ stream << " type= " << SafePrettyTypeOf(obj);
+ }
+ // Re-protect the address we faulted on.
+ mprotect(AlignDown(obj, kPageSize), kPageSize, PROT_NONE);
+ }
+}
+
bool Heap::IsCompilingBoot() const {
for (const auto& space : continuous_spaces_) {
if (space->IsImageSpace() || space->IsZygoteSpace()) {
@@ -803,16 +894,23 @@ bool Heap::IsValidObjectAddress(const mirror::Object* obj) const {
if (obj == nullptr) {
return true;
}
- return IsAligned<kObjectAlignment>(obj) && IsHeapAddress(obj);
+ return IsAligned<kObjectAlignment>(obj) && FindSpaceFromObject(obj, true) != nullptr;
}
bool Heap::IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const {
return FindContinuousSpaceFromObject(obj, true) != nullptr;
}
-bool Heap::IsHeapAddress(const mirror::Object* obj) const {
- // TODO: This might not work for large objects.
- return FindSpaceFromObject(obj, true) != nullptr;
+bool Heap::IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const {
+ if (obj == nullptr || !IsAligned<kObjectAlignment>(obj)) {
+ return false;
+ }
+ for (const auto& space : continuous_spaces_) {
+ if (space->HasAddress(obj)) {
+ return true;
+ }
+ }
+ return false;
}
bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
@@ -1485,15 +1583,13 @@ void Heap::PreZygoteFork() {
main_space_->SetFootprintLimit(main_space_->Capacity());
AddSpace(main_space_);
have_zygote_space_ = true;
+ // Enable large object space allocations.
+ large_object_threshold_ = kDefaultLargeObjectThreshold;
// Create the zygote space mod union table.
accounting::ModUnionTable* mod_union_table =
new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space);
CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
AddModUnionTable(mod_union_table);
- // Reset the cumulative loggers since we now have a few additional timing phases.
- for (const auto& collector : garbage_collectors_) {
- collector->ResetCumulativeStatistics();
- }
// Can't use RosAlloc for non moving space due to thread local buffers.
// TODO: Non limited space for non-movable objects?
MemMap* mem_map = post_zygote_non_moving_space_mem_map_.release();
@@ -1535,6 +1631,7 @@ void Heap::MarkAllocStack(accounting::SpaceBitmap* bitmap1,
void Heap::SwapSemiSpaces() {
// Swap the spaces so we allocate into the space which we just evacuated.
std::swap(bump_pointer_space_, temp_space_);
+ bump_pointer_space_->Clear();
}
void Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
@@ -1612,7 +1709,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
CHECK(temp_space_->IsEmpty());
semi_space_collector_->SetFromSpace(bump_pointer_space_);
semi_space_collector_->SetToSpace(temp_space_);
- mprotect(temp_space_->Begin(), temp_space_->Capacity(), PROT_READ | PROT_WRITE);
+ temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
collector = semi_space_collector_;
gc_type = collector::kGcTypeFull;
} else if (current_allocator_ == kAllocatorTypeRosAlloc ||
@@ -2049,7 +2146,8 @@ void Heap::ProcessCards(TimingLogger& timings) {
TimingLogger::ScopedSplit split("AllocSpaceClearCards", &timings);
// No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards
// were dirty before the GC started.
- // TODO: Don't need to use atomic.
+ // TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread)
+ // -> clean(cleaning thread).
// The races are we either end up with: Aged card, unaged card. Since we have the checkpoint
// roots and then we scan / update mod union tables after. We will always scan either card.
// If we end up with the non aged card, we scan it it in the pause.
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 5d3232f..b194d8d 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -119,7 +119,7 @@ class Heap {
// If true, measure the total allocation time.
static constexpr bool kMeasureAllocationTime = false;
// Primitive arrays larger than this size are put in the large object space.
- static constexpr size_t kLargeObjectThreshold = 3 * kPageSize;
+ static constexpr size_t kDefaultLargeObjectThreshold = 3 * kPageSize;
static constexpr size_t kDefaultInitialSize = 2 * MB;
static constexpr size_t kDefaultMaximumSize = 32 * MB;
@@ -223,9 +223,6 @@ class Heap {
bool IsValidObjectAddress(const mirror::Object* obj) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Returns true if the address passed in is a heap address, doesn't need to be aligned.
- bool IsHeapAddress(const mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// Faster alternative to IsHeapAddress since finding if an object is in the large object space is
// very slow.
bool IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const
@@ -519,6 +516,12 @@ class Heap {
void DumpSpaces(std::ostream& stream = LOG(INFO));
+ // Dump object should only be used by the signal handler.
+ void DumpObject(std::ostream& stream, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
+ // Safe version of pretty type of which check to make sure objects are heap addresses.
+ std::string SafeGetClassDescriptor(mirror::Class* klass) NO_THREAD_SAFETY_ANALYSIS;
+ std::string SafePrettyTypeOf(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
+
// GC performance measuring
void DumpGcPerformanceInfo(std::ostream& os);
@@ -600,6 +603,10 @@ class Heap {
template <bool kGrow>
bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size);
+ // Returns true if the address passed in is within the address range of a continuous space.
+ bool IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Pushes a list of cleared references out to the managed heap.
void SetReferenceReferent(mirror::Object* reference, mirror::Object* referent)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -743,6 +750,9 @@ class Heap {
// If we have a zygote space.
bool have_zygote_space_;
+ // Minimum allocation size of large object.
+ size_t large_object_threshold_;
+
// Guards access to the state of GC, associated conditional variable is used to signal when a GC
// completes.
Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 43674ea..fcd3b70 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -61,6 +61,9 @@ BumpPointerSpace::BumpPointerSpace(const std::string& name, MemMap* mem_map)
void BumpPointerSpace::Clear() {
// Release the pages back to the operating system.
CHECK_NE(madvise(Begin(), Limit() - Begin(), MADV_DONTNEED), -1) << "madvise failed";
+}
+
+void BumpPointerSpace::Reset() {
// Reset the end of the space back to the beginning, we move the end forward as we allocate
// objects.
SetEnd(Begin());
@@ -75,8 +78,9 @@ void BumpPointerSpace::Clear() {
}
void BumpPointerSpace::Dump(std::ostream& os) const {
- os << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(End()) << " - "
- << reinterpret_cast<void*>(Limit());
+ os << GetName() << " "
+ << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(End()) << " - "
+ << reinterpret_cast<void*>(Limit());
}
mirror::Object* BumpPointerSpace::GetNextObject(mirror::Object* obj) {
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 476b833..2c9d35f 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -92,8 +92,11 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
return nullptr;
}
- // Clear the memory and reset the pointer to the start of the space.
- void Clear() OVERRIDE LOCKS_EXCLUDED(block_lock_);
+ // Madvise the memory back to the OS.
+ void Clear() OVERRIDE;
+
+ // Reset the pointer to the start of the space.
+ void Reset() OVERRIDE LOCKS_EXCLUDED(block_lock_);
void Dump(std::ostream& os) const;
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index caedaaf..b591486 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -281,12 +281,15 @@ uint64_t DlMallocSpace::GetObjectsAllocated() {
}
void DlMallocSpace::Clear() {
- // TODO: Delete and create new mspace here.
madvise(GetMemMap()->Begin(), GetMemMap()->Size(), MADV_DONTNEED);
GetLiveBitmap()->Clear();
GetMarkBitmap()->Clear();
}
+void DlMallocSpace::Reset() {
+ // TODO: Delete and create new mspace here.
+}
+
#ifndef NDEBUG
void DlMallocSpace::CheckMoreCoreForPrecondition() {
lock_.AssertHeld(Thread::Current());
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 6ea10ad..4bf16ce 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -113,6 +113,7 @@ class DlMallocSpace : public MallocSpace {
uint64_t GetObjectsAllocated() OVERRIDE;
void Clear() OVERRIDE;
+ void Reset() OVERRIDE;
bool IsDlMallocSpace() const OVERRIDE {
return true;
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index fe8421d..fb621ea 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -304,12 +304,15 @@ void RosAllocSpace::RevokeAllThreadLocalBuffers() {
}
void RosAllocSpace::Clear() {
- // TODO: Delete and create new mspace here.
madvise(GetMemMap()->Begin(), GetMemMap()->Size(), MADV_DONTNEED);
GetLiveBitmap()->Clear();
GetMarkBitmap()->Clear();
}
+void RosAllocSpace::Reset() {
+ // TODO: Delete and create new mspace here.
+}
+
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index bd32196..5bc425d 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -80,6 +80,7 @@ class RosAllocSpace : public MallocSpace {
void SetFootprintLimit(size_t limit) OVERRIDE;
void Clear() OVERRIDE;
+ void Reset() OVERRIDE;
MallocSpace* CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
byte* begin, byte* end, byte* limit, size_t growth_limit);
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 0f8f38a..37d7c80 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -399,6 +399,9 @@ class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
// Free all memory associated with this space.
virtual void Clear() = 0;
+ // Reset the space back to an empty space.
+ virtual void Reset() = 0;
+
accounting::SpaceBitmap* GetLiveBitmap() const {
return live_bitmap_.get();
}
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index a60ab38..d1c3d03 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -61,6 +61,10 @@ void ZygoteSpace::Clear() {
LOG(FATAL) << "Unimplemented";
}
+void ZygoteSpace::Reset() {
+ LOG(FATAL) << "Unimplemented";
+}
+
ZygoteSpace::ZygoteSpace(const std::string& name, MemMap* mem_map, size_t objects_allocated)
: ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(),
kGcRetentionPolicyFullCollect),
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 8cd1a9f..8880548 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -71,7 +71,8 @@ class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace {
return objects_allocated_;
}
- void Clear();
+ void Clear() OVERRIDE;
+ void Reset() OVERRIDE;
protected:
virtual accounting::SpaceBitmap::SweepCallback* GetSweepCallback() {
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 37fb2db..2db0f5f 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -26,7 +26,6 @@
#include "base/logging.h"
#include "base/mutex.h"
#include "base/stl_util.h"
-#include "base/stringpiece.h"
#include "class_linker-inl.h"
#include "dex_file-inl.h"
#include "gc/accounting/card_table-inl.h"
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 8158bc5..1d37775 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -141,6 +141,7 @@ inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_c
allocator_type, visitor));
}
if (kIsDebugBuild && result != nullptr && Runtime::Current()->IsStarted()) {
+ array_class = result->GetClass(); // In case the array class moved.
CHECK_EQ(array_class->GetComponentSize(), component_size);
if (!fill_usable) {
CHECK_EQ(result->SizeOf(), size);
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 1ef15f7..90ba7d3 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -91,6 +91,8 @@ Runtime::Runtime()
resolution_method_(nullptr),
imt_conflict_method_(nullptr),
default_imt_(nullptr),
+ fault_message_lock_("Fault message lock"),
+ fault_message_(""),
method_verifiers_lock_("Method verifiers lock"),
threads_being_born_(0),
shutdown_cond_(new ConditionVariable("Runtime shutdown", *Locks::runtime_shutdown_lock_)),
@@ -1598,4 +1600,9 @@ void Runtime::RecordWeakStringRemoval(mirror::String* s, uint32_t hash_code) con
DCHECK(IsActiveTransaction());
preinitialization_transaction->RecordWeakStringRemoval(s, hash_code);
}
+
+void Runtime::SetFaultMessage(const std::string& message) {
+ MutexLock mu(Thread::Current(), fault_message_lock_);
+ fault_message_ = message;
+}
} // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 8924921..249bb45 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -446,6 +446,13 @@ class Runtime {
void RecordWeakStringRemoval(mirror::String* s, uint32_t hash_code) const
EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ void SetFaultMessage(const std::string& message);
+ // Only read by the signal handler, NO_THREAD_SAFETY_ANALYSIS to prevent lock order violations
+ // with the unexpected_signal_lock_.
+ const std::string& GetFaultMessage() NO_THREAD_SAFETY_ANALYSIS {
+ return fault_message_;
+ }
+
private:
static void InitPlatformSignalHandlers();
@@ -520,6 +527,10 @@ class Runtime {
mirror::ObjectArray<mirror::ArtMethod>* default_imt_;
+ // Fault message, printed when we get a SIGSEGV.
+ Mutex fault_message_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ std::string fault_message_ GUARDED_BY(fault_message_lock_);
+
// Method verifier set, used so that we can update their GC roots.
Mutex method_verifiers_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
std::set<verifier::MethodVerifier*> method_verifiers_;
diff --git a/runtime/runtime_android.cc b/runtime/runtime_android.cc
index 2013294..14e5574 100644
--- a/runtime/runtime_android.cc
+++ b/runtime/runtime_android.cc
@@ -14,12 +14,58 @@
* limitations under the License.
*/
-#include "runtime.h"
+#include <signal.h>
+#include <string.h>
+#include <sys/utsname.h>
+#include <inttypes.h>
+
+#include "base/logging.h"
+#include "base/mutex.h"
+#include "base/stringprintf.h"
+#include "thread-inl.h"
+#include "utils.h"
namespace art {
+struct sigaction old_action;
+void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_context) {
+ static bool handlingUnexpectedSignal = false;
+ if (handlingUnexpectedSignal) {
+ LogMessageData data(__FILE__, __LINE__, INTERNAL_FATAL, -1);
+ LogMessage::LogLine(data, "HandleUnexpectedSignal reentered\n");
+ _exit(1);
+ }
+ handlingUnexpectedSignal = true;
+ gAborting++; // set before taking any locks
+ MutexLock mu(Thread::Current(), *Locks::unexpected_signal_lock_);
+
+ Runtime* runtime = Runtime::Current();
+ if (runtime != nullptr) {
+ // Print this out first in case DumpObject faults.
+ LOG(INTERNAL_FATAL) << "Fault message: " << runtime->GetFaultMessage();
+ gc::Heap* heap = runtime->GetHeap();
+ if (heap != nullptr && info != nullptr) {
+ LOG(INTERNAL_FATAL) << "Dump heap object at fault address: ";
+ heap->DumpObject(LOG(INTERNAL_FATAL), reinterpret_cast<mirror::Object*>(info->si_addr));
+ }
+ }
+ // Run the old signal handler.
+ old_action.sa_sigaction(signal_number, info, raw_context);
+}
+
void Runtime::InitPlatformSignalHandlers() {
- // On a device, debuggerd will give us a stack trace. Nothing to do here.
+ // On the host, we don't have debuggerd to dump a stack for us when something unexpected happens.
+ struct sigaction action;
+ memset(&action, 0, sizeof(action));
+ sigemptyset(&action.sa_mask);
+ action.sa_sigaction = HandleUnexpectedSignal;
+ // Use the three-argument sa_sigaction handler.
+ action.sa_flags |= SA_SIGINFO;
+ // Use the alternate signal stack so we can catch stack overflows.
+ action.sa_flags |= SA_ONSTACK;
+ int rc = 0;
+ rc += sigaction(SIGSEGV, &action, &old_action);
+ CHECK_EQ(rc, 0);
}
} // namespace art
diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc
index 73ac034..4a166d7 100644
--- a/runtime/runtime_linux.cc
+++ b/runtime/runtime_linux.cc
@@ -305,7 +305,15 @@ void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_contex
<< "Thread: " << tid << " \"" << thread_name << "\"\n"
<< "Registers:\n" << Dumpable<UContext>(thread_context) << "\n"
<< "Backtrace:\n" << Dumpable<Backtrace>(thread_backtrace);
-
+ Runtime* runtime = Runtime::Current();
+ if (runtime != nullptr) {
+ gc::Heap* heap = runtime->GetHeap();
+ LOG(INTERNAL_FATAL) << "Fault message: " << runtime->GetFaultMessage();
+ if (heap != nullptr && info != nullptr) {
+ LOG(INTERNAL_FATAL) << "Dump heap object at fault address: ";
+ heap->DumpObject(LOG(INTERNAL_FATAL), reinterpret_cast<mirror::Object*>(info->si_addr));
+ }
+ }
if (getenv("debug_db_uid") != NULL || getenv("art_wait_for_gdb_on_crash") != NULL) {
LOG(INTERNAL_FATAL) << "********************************************************\n"
<< "* Process " << getpid() << " thread " << tid << " \"" << thread_name << "\""
diff --git a/runtime/thread.h b/runtime/thread.h
index f9d31af..fcae9e4 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -42,6 +42,12 @@
namespace art {
+namespace gc {
+namespace collector {
+class SemiSpace;
+} // namespace collector
+} // namespace gc
+
namespace mirror {
class ArtMethod;
class Array;
@@ -851,6 +857,7 @@ class PACKED(4) Thread {
private:
friend class Dbg; // For SetStateUnsafe.
+ friend class gc::collector::SemiSpace; // For getting stack traces.
friend class Monitor;
friend class MonitorInfo;
friend class Runtime; // For CreatePeer.
diff --git a/runtime/utils.cc b/runtime/utils.cc
index df1ab94..d8f8f8f 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -1232,8 +1232,8 @@ bool Exec(std::vector<std::string>& arg_vector, std::string* error_msg) {
execv(program, &args[0]);
- PLOG(FATAL) << "Failed to execv(" << command_line << ")";
- return false;
+ PLOG(ERROR) << "Failed to execv(" << command_line << ")";
+ exit(1);
} else {
if (pid == -1) {
*error_msg = StringPrintf("Failed to execv(%s) because fork failed: %s",
diff --git a/runtime/utils.h b/runtime/utils.h
index 0bb06de..bcbeb0e 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -23,7 +23,6 @@
#include <vector>
#include "base/logging.h"
-#include "base/stringpiece.h"
#include "base/stringprintf.h"
#include "globals.h"
#include "primitive.h"
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index ffa8b9e..4d11283 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -20,7 +20,6 @@
#include "base/logging.h"
#include "base/mutex-inl.h"
-#include "base/stringpiece.h"
#include "class_linker.h"
#include "compiler_callbacks.h"
#include "dex_file-inl.h"
diff --git a/runtime/zip_archive.h b/runtime/zip_archive.h
index 1f48e0a..2169fe0 100644
--- a/runtime/zip_archive.h
+++ b/runtime/zip_archive.h
@@ -22,7 +22,6 @@
#include <ziparchive/zip_archive.h>
#include "base/logging.h"
-#include "base/stringpiece.h"
#include "base/unix_file/random_access_file.h"
#include "globals.h"
#include "mem_map.h"